1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012 ARM Ltd.
4 */
5 #ifndef __ASM_PGTABLE_H
6 #define __ASM_PGTABLE_H
7
8 #include <asm/bug.h>
9 #include <asm/proc-fns.h>
10
11 #include <asm/memory.h>
12 #include <asm/mte.h>
13 #include <asm/pgtable-hwdef.h>
14 #include <asm/pgtable-prot.h>
15 #include <asm/tlbflush.h>
16
17 /*
18 * VMALLOC range.
19 *
20 * VMALLOC_START: beginning of the kernel vmalloc space
21 * VMALLOC_END: extends to the available space below vmemmap
22 */
23 #define VMALLOC_START (MODULES_END)
24 #if VA_BITS == VA_BITS_MIN
25 #define VMALLOC_END (VMEMMAP_START - SZ_8M)
26 #else
27 #define VMEMMAP_UNUSED_NPAGES ((_PAGE_OFFSET(vabits_actual) - PAGE_OFFSET) >> PAGE_SHIFT)
28 #define VMALLOC_END (VMEMMAP_START + VMEMMAP_UNUSED_NPAGES * sizeof(struct page) - SZ_8M)
29 #endif
30
31 #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
32
33 #ifndef __ASSEMBLY__
34
35 #include <asm/cmpxchg.h>
36 #include <asm/fixmap.h>
37 #include <asm/por.h>
38 #include <linux/mmdebug.h>
39 #include <linux/mm_types.h>
40 #include <linux/sched.h>
41 #include <linux/page_table_check.h>
42
emit_pte_barriers(void)43 static inline void emit_pte_barriers(void)
44 {
45 /*
46 * These barriers are emitted under certain conditions after a pte entry
47 * was modified (see e.g. __set_pte_complete()). The dsb makes the store
48 * visible to the table walker. The isb ensures that any previous
49 * speculative "invalid translation" marker that is in the CPU's
50 * pipeline gets cleared, so that any access to that address after
51 * setting the pte to valid won't cause a spurious fault. If the thread
52 * gets preempted after storing to the pgtable but before emitting these
53 * barriers, __switch_to() emits a dsb which ensure the walker gets to
54 * see the store. There is no guarantee of an isb being issued though.
55 * This is safe because it will still get issued (albeit on a
56 * potentially different CPU) when the thread starts running again,
57 * before any access to the address.
58 */
59 dsb(ishst);
60 isb();
61 }
62
queue_pte_barriers(void)63 static inline void queue_pte_barriers(void)
64 {
65 unsigned long flags;
66
67 if (in_interrupt()) {
68 emit_pte_barriers();
69 return;
70 }
71
72 flags = read_thread_flags();
73
74 if (flags & BIT(TIF_LAZY_MMU)) {
75 /* Avoid the atomic op if already set. */
76 if (!(flags & BIT(TIF_LAZY_MMU_PENDING)))
77 set_thread_flag(TIF_LAZY_MMU_PENDING);
78 } else {
79 emit_pte_barriers();
80 }
81 }
82
83 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
arch_enter_lazy_mmu_mode(void)84 static inline void arch_enter_lazy_mmu_mode(void)
85 {
86 /*
87 * lazy_mmu_mode is not supposed to permit nesting. But in practice this
88 * does happen with CONFIG_DEBUG_PAGEALLOC, where a page allocation
89 * inside a lazy_mmu_mode section (such as zap_pte_range()) will change
90 * permissions on the linear map with apply_to_page_range(), which
91 * re-enters lazy_mmu_mode. So we tolerate nesting in our
92 * implementation. The first call to arch_leave_lazy_mmu_mode() will
93 * flush and clear the flag such that the remainder of the work in the
94 * outer nest behaves as if outside of lazy mmu mode. This is safe and
95 * keeps tracking simple.
96 */
97
98 if (in_interrupt())
99 return;
100
101 set_thread_flag(TIF_LAZY_MMU);
102 }
103
arch_flush_lazy_mmu_mode(void)104 static inline void arch_flush_lazy_mmu_mode(void)
105 {
106 if (in_interrupt())
107 return;
108
109 if (test_and_clear_thread_flag(TIF_LAZY_MMU_PENDING))
110 emit_pte_barriers();
111 }
112
arch_leave_lazy_mmu_mode(void)113 static inline void arch_leave_lazy_mmu_mode(void)
114 {
115 if (in_interrupt())
116 return;
117
118 arch_flush_lazy_mmu_mode();
119 clear_thread_flag(TIF_LAZY_MMU);
120 }
121
122 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
123 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
124
125 /* Set stride and tlb_level in flush_*_tlb_range */
126 #define flush_pmd_tlb_range(vma, addr, end) \
127 __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2)
128 #define flush_pud_tlb_range(vma, addr, end) \
129 __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
130 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
131
132 /*
133 * Outside of a few very special situations (e.g. hibernation), we always
134 * use broadcast TLB invalidation instructions, therefore a spurious page
135 * fault on one CPU which has been handled concurrently by another CPU
136 * does not need to perform additional invalidation.
137 */
138 #define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0)
139
140 /*
141 * ZERO_PAGE is a global shared page that is always zero: used
142 * for zero-mapped memory areas etc..
143 */
144 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
145 #define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
146
147 #define pte_ERROR(e) \
148 pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
149
150 #ifdef CONFIG_ARM64_PA_BITS_52
__pte_to_phys(pte_t pte)151 static inline phys_addr_t __pte_to_phys(pte_t pte)
152 {
153 pte_val(pte) &= ~PTE_MAYBE_SHARED;
154 return (pte_val(pte) & PTE_ADDR_LOW) |
155 ((pte_val(pte) & PTE_ADDR_HIGH) << PTE_ADDR_HIGH_SHIFT);
156 }
__phys_to_pte_val(phys_addr_t phys)157 static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
158 {
159 return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK;
160 }
161 #else
__pte_to_phys(pte_t pte)162 static inline phys_addr_t __pte_to_phys(pte_t pte)
163 {
164 return pte_val(pte) & PTE_ADDR_LOW;
165 }
166
__phys_to_pte_val(phys_addr_t phys)167 static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
168 {
169 return phys;
170 }
171 #endif
172
173 #define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
174 #define pfn_pte(pfn,prot) \
175 __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
176
177 #define pte_none(pte) (!pte_val(pte))
178 #define __pte_clear(mm, addr, ptep) \
179 __set_pte(ptep, __pte(0))
180 #define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
181
182 /*
183 * The following only work if pte_present(). Undefined behaviour otherwise.
184 */
185 #define pte_present(pte) (pte_valid(pte) || pte_present_invalid(pte))
186 #define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
187 #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
188 #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
189 #define pte_rdonly(pte) (!!(pte_val(pte) & PTE_RDONLY))
190 #define pte_user(pte) (!!(pte_val(pte) & PTE_USER))
191 #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
192 #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
193 #define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \
194 PTE_ATTRINDX(MT_NORMAL_TAGGED))
195
196 #define pte_cont_addr_end(addr, end) \
197 ({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \
198 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
199 })
200
201 #define pmd_cont_addr_end(addr, end) \
202 ({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \
203 (__boundary - 1 < (end) - 1) ? __boundary : (end); \
204 })
205
206 #define pte_hw_dirty(pte) (pte_write(pte) && !pte_rdonly(pte))
207 #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
208 #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
209
210 #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
211 #define pte_present_invalid(pte) \
212 ((pte_val(pte) & (PTE_VALID | PTE_PRESENT_INVALID)) == PTE_PRESENT_INVALID)
213 /*
214 * Execute-only user mappings do not have the PTE_USER bit set. All valid
215 * kernel mappings have the PTE_UXN bit set.
216 */
217 #define pte_valid_not_user(pte) \
218 ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
219 /*
220 * Returns true if the pte is valid and has the contiguous bit set.
221 */
222 #define pte_valid_cont(pte) (pte_valid(pte) && pte_cont(pte))
223 /*
224 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
225 * so that we don't erroneously return false for pages that have been
226 * remapped as PROT_NONE but are yet to be flushed from the TLB.
227 * Note that we can't make any assumptions based on the state of the access
228 * flag, since __ptep_clear_flush_young() elides a DSB when invalidating the
229 * TLB.
230 */
231 #define pte_accessible(mm, pte) \
232 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
233
por_el0_allows_pkey(u8 pkey,bool write,bool execute)234 static inline bool por_el0_allows_pkey(u8 pkey, bool write, bool execute)
235 {
236 u64 por;
237
238 if (!system_supports_poe())
239 return true;
240
241 por = read_sysreg_s(SYS_POR_EL0);
242
243 if (write)
244 return por_elx_allows_write(por, pkey);
245
246 if (execute)
247 return por_elx_allows_exec(por, pkey);
248
249 return por_elx_allows_read(por, pkey);
250 }
251
252 /*
253 * p??_access_permitted() is true for valid user mappings (PTE_USER
254 * bit set, subject to the write permission check). For execute-only
255 * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits
256 * not set) must return false. PROT_NONE mappings do not have the
257 * PTE_VALID bit set.
258 */
259 #define pte_access_permitted_no_overlay(pte, write) \
260 (((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte)))
261 #define pte_access_permitted(pte, write) \
262 (pte_access_permitted_no_overlay(pte, write) && \
263 por_el0_allows_pkey(FIELD_GET(PTE_PO_IDX_MASK, pte_val(pte)), write, false))
264 #define pmd_access_permitted(pmd, write) \
265 (pte_access_permitted(pmd_pte(pmd), (write)))
266 #define pud_access_permitted(pud, write) \
267 (pte_access_permitted(pud_pte(pud), (write)))
268
clear_pte_bit(pte_t pte,pgprot_t prot)269 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
270 {
271 pte_val(pte) &= ~pgprot_val(prot);
272 return pte;
273 }
274
set_pte_bit(pte_t pte,pgprot_t prot)275 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
276 {
277 pte_val(pte) |= pgprot_val(prot);
278 return pte;
279 }
280
clear_pmd_bit(pmd_t pmd,pgprot_t prot)281 static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
282 {
283 pmd_val(pmd) &= ~pgprot_val(prot);
284 return pmd;
285 }
286
set_pmd_bit(pmd_t pmd,pgprot_t prot)287 static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
288 {
289 pmd_val(pmd) |= pgprot_val(prot);
290 return pmd;
291 }
292
pte_mkwrite_novma(pte_t pte)293 static inline pte_t pte_mkwrite_novma(pte_t pte)
294 {
295 pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
296 if (pte_sw_dirty(pte))
297 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
298 return pte;
299 }
300
pte_mkclean(pte_t pte)301 static inline pte_t pte_mkclean(pte_t pte)
302 {
303 pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
304 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
305
306 return pte;
307 }
308
pte_mkdirty(pte_t pte)309 static inline pte_t pte_mkdirty(pte_t pte)
310 {
311 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
312
313 if (pte_write(pte))
314 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
315
316 return pte;
317 }
318
pte_wrprotect(pte_t pte)319 static inline pte_t pte_wrprotect(pte_t pte)
320 {
321 /*
322 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
323 * clear), set the PTE_DIRTY bit.
324 */
325 if (pte_hw_dirty(pte))
326 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
327
328 pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
329 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
330 return pte;
331 }
332
pte_mkold(pte_t pte)333 static inline pte_t pte_mkold(pte_t pte)
334 {
335 return clear_pte_bit(pte, __pgprot(PTE_AF));
336 }
337
pte_mkyoung(pte_t pte)338 static inline pte_t pte_mkyoung(pte_t pte)
339 {
340 return set_pte_bit(pte, __pgprot(PTE_AF));
341 }
342
pte_mkspecial(pte_t pte)343 static inline pte_t pte_mkspecial(pte_t pte)
344 {
345 return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
346 }
347
pte_mkcont(pte_t pte)348 static inline pte_t pte_mkcont(pte_t pte)
349 {
350 return set_pte_bit(pte, __pgprot(PTE_CONT));
351 }
352
pte_mknoncont(pte_t pte)353 static inline pte_t pte_mknoncont(pte_t pte)
354 {
355 return clear_pte_bit(pte, __pgprot(PTE_CONT));
356 }
357
pte_mkvalid(pte_t pte)358 static inline pte_t pte_mkvalid(pte_t pte)
359 {
360 return set_pte_bit(pte, __pgprot(PTE_VALID));
361 }
362
pte_mkinvalid(pte_t pte)363 static inline pte_t pte_mkinvalid(pte_t pte)
364 {
365 pte = set_pte_bit(pte, __pgprot(PTE_PRESENT_INVALID));
366 pte = clear_pte_bit(pte, __pgprot(PTE_VALID));
367 return pte;
368 }
369
pmd_mkcont(pmd_t pmd)370 static inline pmd_t pmd_mkcont(pmd_t pmd)
371 {
372 return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
373 }
374
pmd_mknoncont(pmd_t pmd)375 static inline pmd_t pmd_mknoncont(pmd_t pmd)
376 {
377 return __pmd(pmd_val(pmd) & ~PMD_SECT_CONT);
378 }
379
380 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
pte_uffd_wp(pte_t pte)381 static inline int pte_uffd_wp(pte_t pte)
382 {
383 return !!(pte_val(pte) & PTE_UFFD_WP);
384 }
385
pte_mkuffd_wp(pte_t pte)386 static inline pte_t pte_mkuffd_wp(pte_t pte)
387 {
388 return pte_wrprotect(set_pte_bit(pte, __pgprot(PTE_UFFD_WP)));
389 }
390
pte_clear_uffd_wp(pte_t pte)391 static inline pte_t pte_clear_uffd_wp(pte_t pte)
392 {
393 return clear_pte_bit(pte, __pgprot(PTE_UFFD_WP));
394 }
395 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
396
__set_pte_nosync(pte_t * ptep,pte_t pte)397 static inline void __set_pte_nosync(pte_t *ptep, pte_t pte)
398 {
399 WRITE_ONCE(*ptep, pte);
400 }
401
__set_pte_complete(pte_t pte)402 static inline void __set_pte_complete(pte_t pte)
403 {
404 /*
405 * Only if the new pte is valid and kernel, otherwise TLB maintenance
406 * has the necessary barriers.
407 */
408 if (pte_valid_not_user(pte))
409 queue_pte_barriers();
410 }
411
__set_pte(pte_t * ptep,pte_t pte)412 static inline void __set_pte(pte_t *ptep, pte_t pte)
413 {
414 __set_pte_nosync(ptep, pte);
415 __set_pte_complete(pte);
416 }
417
__ptep_get(pte_t * ptep)418 static inline pte_t __ptep_get(pte_t *ptep)
419 {
420 return READ_ONCE(*ptep);
421 }
422
423 extern void __sync_icache_dcache(pte_t pteval);
424 bool pgattr_change_is_safe(pteval_t old, pteval_t new);
425
426 /*
427 * PTE bits configuration in the presence of hardware Dirty Bit Management
428 * (PTE_WRITE == PTE_DBM):
429 *
430 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw)
431 * 0 0 | 1 0 0
432 * 0 1 | 1 1 0
433 * 1 0 | 1 0 1
434 * 1 1 | 0 1 x
435 *
436 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
437 * the page fault mechanism. Checking the dirty status of a pte becomes:
438 *
439 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
440 */
441
__check_safe_pte_update(struct mm_struct * mm,pte_t * ptep,pte_t pte)442 static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep,
443 pte_t pte)
444 {
445 pte_t old_pte;
446
447 if (!IS_ENABLED(CONFIG_DEBUG_VM))
448 return;
449
450 old_pte = __ptep_get(ptep);
451
452 if (!pte_valid(old_pte) || !pte_valid(pte))
453 return;
454 if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1)
455 return;
456
457 /*
458 * Check for potential race with hardware updates of the pte
459 * (__ptep_set_access_flags safely changes valid ptes without going
460 * through an invalid entry).
461 */
462 VM_WARN_ONCE(!pte_young(pte),
463 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
464 __func__, pte_val(old_pte), pte_val(pte));
465 VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
466 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
467 __func__, pte_val(old_pte), pte_val(pte));
468 VM_WARN_ONCE(!pgattr_change_is_safe(pte_val(old_pte), pte_val(pte)),
469 "%s: unsafe attribute change: 0x%016llx -> 0x%016llx",
470 __func__, pte_val(old_pte), pte_val(pte));
471 }
472
__sync_cache_and_tags(pte_t pte,unsigned int nr_pages)473 static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages)
474 {
475 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
476 __sync_icache_dcache(pte);
477
478 /*
479 * If the PTE would provide user space access to the tags associated
480 * with it then ensure that the MTE tags are synchronised. Although
481 * pte_access_permitted_no_overlay() returns false for exec only
482 * mappings, they don't expose tags (instruction fetches don't check
483 * tags).
484 */
485 if (system_supports_mte() && pte_access_permitted_no_overlay(pte, false) &&
486 !pte_special(pte) && pte_tagged(pte))
487 mte_sync_tags(pte, nr_pages);
488 }
489
490 /*
491 * Select all bits except the pfn
492 */
493 #define pte_pgprot pte_pgprot
pte_pgprot(pte_t pte)494 static inline pgprot_t pte_pgprot(pte_t pte)
495 {
496 unsigned long pfn = pte_pfn(pte);
497
498 return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
499 }
500
501 #define pte_advance_pfn pte_advance_pfn
pte_advance_pfn(pte_t pte,unsigned long nr)502 static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr)
503 {
504 return pfn_pte(pte_pfn(pte) + nr, pte_pgprot(pte));
505 }
506
507 /*
508 * Hugetlb definitions.
509 */
510 #define HUGE_MAX_HSTATE 4
511 #define HPAGE_SHIFT PMD_SHIFT
512 #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
513 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
514 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
515
pgd_pte(pgd_t pgd)516 static inline pte_t pgd_pte(pgd_t pgd)
517 {
518 return __pte(pgd_val(pgd));
519 }
520
p4d_pte(p4d_t p4d)521 static inline pte_t p4d_pte(p4d_t p4d)
522 {
523 return __pte(p4d_val(p4d));
524 }
525
pud_pte(pud_t pud)526 static inline pte_t pud_pte(pud_t pud)
527 {
528 return __pte(pud_val(pud));
529 }
530
pte_pud(pte_t pte)531 static inline pud_t pte_pud(pte_t pte)
532 {
533 return __pud(pte_val(pte));
534 }
535
pud_pmd(pud_t pud)536 static inline pmd_t pud_pmd(pud_t pud)
537 {
538 return __pmd(pud_val(pud));
539 }
540
pmd_pte(pmd_t pmd)541 static inline pte_t pmd_pte(pmd_t pmd)
542 {
543 return __pte(pmd_val(pmd));
544 }
545
pte_pmd(pte_t pte)546 static inline pmd_t pte_pmd(pte_t pte)
547 {
548 return __pmd(pte_val(pte));
549 }
550
mk_pud_sect_prot(pgprot_t prot)551 static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
552 {
553 return __pgprot((pgprot_val(prot) & ~PUD_TYPE_MASK) | PUD_TYPE_SECT);
554 }
555
mk_pmd_sect_prot(pgprot_t prot)556 static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
557 {
558 return __pgprot((pgprot_val(prot) & ~PMD_TYPE_MASK) | PMD_TYPE_SECT);
559 }
560
pte_swp_mkexclusive(pte_t pte)561 static inline pte_t pte_swp_mkexclusive(pte_t pte)
562 {
563 return set_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
564 }
565
pte_swp_exclusive(pte_t pte)566 static inline bool pte_swp_exclusive(pte_t pte)
567 {
568 return pte_val(pte) & PTE_SWP_EXCLUSIVE;
569 }
570
pte_swp_clear_exclusive(pte_t pte)571 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
572 {
573 return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
574 }
575
576 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
pte_swp_mkuffd_wp(pte_t pte)577 static inline pte_t pte_swp_mkuffd_wp(pte_t pte)
578 {
579 return set_pte_bit(pte, __pgprot(PTE_SWP_UFFD_WP));
580 }
581
pte_swp_uffd_wp(pte_t pte)582 static inline int pte_swp_uffd_wp(pte_t pte)
583 {
584 return !!(pte_val(pte) & PTE_SWP_UFFD_WP);
585 }
586
pte_swp_clear_uffd_wp(pte_t pte)587 static inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
588 {
589 return clear_pte_bit(pte, __pgprot(PTE_SWP_UFFD_WP));
590 }
591 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
592
593 #ifdef CONFIG_NUMA_BALANCING
594 /*
595 * See the comment in include/linux/pgtable.h
596 */
pte_protnone(pte_t pte)597 static inline int pte_protnone(pte_t pte)
598 {
599 /*
600 * pte_present_invalid() tells us that the pte is invalid from HW
601 * perspective but present from SW perspective, so the fields are to be
602 * interpretted as per the HW layout. The second 2 checks are the unique
603 * encoding that we use for PROT_NONE. It is insufficient to only use
604 * the first check because we share the same encoding scheme with pmds
605 * which support pmd_mkinvalid(), so can be present-invalid without
606 * being PROT_NONE.
607 */
608 return pte_present_invalid(pte) && !pte_user(pte) && !pte_user_exec(pte);
609 }
610
pmd_protnone(pmd_t pmd)611 static inline int pmd_protnone(pmd_t pmd)
612 {
613 return pte_protnone(pmd_pte(pmd));
614 }
615 #endif
616
617 #define pmd_present(pmd) pte_present(pmd_pte(pmd))
618 #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
619 #define pmd_young(pmd) pte_young(pmd_pte(pmd))
620 #define pmd_valid(pmd) pte_valid(pmd_pte(pmd))
621 #define pmd_user(pmd) pte_user(pmd_pte(pmd))
622 #define pmd_user_exec(pmd) pte_user_exec(pmd_pte(pmd))
623 #define pmd_cont(pmd) pte_cont(pmd_pte(pmd))
624 #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
625 #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
626 #define pmd_mkwrite_novma(pmd) pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)))
627 #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
628 #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
629 #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
630 #define pmd_mkinvalid(pmd) pte_pmd(pte_mkinvalid(pmd_pte(pmd)))
631 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
632 #define pmd_uffd_wp(pmd) pte_uffd_wp(pmd_pte(pmd))
633 #define pmd_mkuffd_wp(pmd) pte_pmd(pte_mkuffd_wp(pmd_pte(pmd)))
634 #define pmd_clear_uffd_wp(pmd) pte_pmd(pte_clear_uffd_wp(pmd_pte(pmd)))
635 #define pmd_swp_uffd_wp(pmd) pte_swp_uffd_wp(pmd_pte(pmd))
636 #define pmd_swp_mkuffd_wp(pmd) pte_pmd(pte_swp_mkuffd_wp(pmd_pte(pmd)))
637 #define pmd_swp_clear_uffd_wp(pmd) \
638 pte_pmd(pte_swp_clear_uffd_wp(pmd_pte(pmd)))
639 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
640
641 #define pmd_write(pmd) pte_write(pmd_pte(pmd))
642
pmd_mkhuge(pmd_t pmd)643 static inline pmd_t pmd_mkhuge(pmd_t pmd)
644 {
645 /*
646 * It's possible that the pmd is present-invalid on entry
647 * and in that case it needs to remain present-invalid on
648 * exit. So ensure the VALID bit does not get modified.
649 */
650 pmdval_t mask = PMD_TYPE_MASK & ~PTE_VALID;
651 pmdval_t val = PMD_TYPE_SECT & ~PTE_VALID;
652
653 return __pmd((pmd_val(pmd) & ~mask) | val);
654 }
655
656 #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
657 #define pmd_special(pte) (!!((pmd_val(pte) & PTE_SPECIAL)))
pmd_mkspecial(pmd_t pmd)658 static inline pmd_t pmd_mkspecial(pmd_t pmd)
659 {
660 return set_pmd_bit(pmd, __pgprot(PTE_SPECIAL));
661 }
662 #endif
663
664 #define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd))
665 #define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
666 #define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
667 #define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
668
669 #define pud_young(pud) pte_young(pud_pte(pud))
670 #define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud)))
671 #define pud_write(pud) pte_write(pud_pte(pud))
672
pud_mkhuge(pud_t pud)673 static inline pud_t pud_mkhuge(pud_t pud)
674 {
675 /*
676 * It's possible that the pud is present-invalid on entry
677 * and in that case it needs to remain present-invalid on
678 * exit. So ensure the VALID bit does not get modified.
679 */
680 pudval_t mask = PUD_TYPE_MASK & ~PTE_VALID;
681 pudval_t val = PUD_TYPE_SECT & ~PTE_VALID;
682
683 return __pud((pud_val(pud) & ~mask) | val);
684 }
685
686 #define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud))
687 #define __phys_to_pud_val(phys) __phys_to_pte_val(phys)
688 #define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
689 #define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
690
691 #define pmd_pgprot pmd_pgprot
pmd_pgprot(pmd_t pmd)692 static inline pgprot_t pmd_pgprot(pmd_t pmd)
693 {
694 unsigned long pfn = pmd_pfn(pmd);
695
696 return __pgprot(pmd_val(pfn_pmd(pfn, __pgprot(0))) ^ pmd_val(pmd));
697 }
698
699 #define pud_pgprot pud_pgprot
pud_pgprot(pud_t pud)700 static inline pgprot_t pud_pgprot(pud_t pud)
701 {
702 unsigned long pfn = pud_pfn(pud);
703
704 return __pgprot(pud_val(pfn_pud(pfn, __pgprot(0))) ^ pud_val(pud));
705 }
706
__set_ptes_anysz(struct mm_struct * mm,pte_t * ptep,pte_t pte,unsigned int nr,unsigned long pgsize)707 static inline void __set_ptes_anysz(struct mm_struct *mm, pte_t *ptep,
708 pte_t pte, unsigned int nr,
709 unsigned long pgsize)
710 {
711 unsigned long stride = pgsize >> PAGE_SHIFT;
712
713 switch (pgsize) {
714 case PAGE_SIZE:
715 page_table_check_ptes_set(mm, ptep, pte, nr);
716 break;
717 case PMD_SIZE:
718 page_table_check_pmds_set(mm, (pmd_t *)ptep, pte_pmd(pte), nr);
719 break;
720 #ifndef __PAGETABLE_PMD_FOLDED
721 case PUD_SIZE:
722 page_table_check_puds_set(mm, (pud_t *)ptep, pte_pud(pte), nr);
723 break;
724 #endif
725 default:
726 VM_WARN_ON(1);
727 }
728
729 __sync_cache_and_tags(pte, nr * stride);
730
731 for (;;) {
732 __check_safe_pte_update(mm, ptep, pte);
733 __set_pte_nosync(ptep, pte);
734 if (--nr == 0)
735 break;
736 ptep++;
737 pte = pte_advance_pfn(pte, stride);
738 }
739
740 __set_pte_complete(pte);
741 }
742
__set_ptes(struct mm_struct * mm,unsigned long __always_unused addr,pte_t * ptep,pte_t pte,unsigned int nr)743 static inline void __set_ptes(struct mm_struct *mm,
744 unsigned long __always_unused addr,
745 pte_t *ptep, pte_t pte, unsigned int nr)
746 {
747 __set_ptes_anysz(mm, ptep, pte, nr, PAGE_SIZE);
748 }
749
__set_pmds(struct mm_struct * mm,unsigned long __always_unused addr,pmd_t * pmdp,pmd_t pmd,unsigned int nr)750 static inline void __set_pmds(struct mm_struct *mm,
751 unsigned long __always_unused addr,
752 pmd_t *pmdp, pmd_t pmd, unsigned int nr)
753 {
754 __set_ptes_anysz(mm, (pte_t *)pmdp, pmd_pte(pmd), nr, PMD_SIZE);
755 }
756 #define set_pmd_at(mm, addr, pmdp, pmd) __set_pmds(mm, addr, pmdp, pmd, 1)
757
__set_puds(struct mm_struct * mm,unsigned long __always_unused addr,pud_t * pudp,pud_t pud,unsigned int nr)758 static inline void __set_puds(struct mm_struct *mm,
759 unsigned long __always_unused addr,
760 pud_t *pudp, pud_t pud, unsigned int nr)
761 {
762 __set_ptes_anysz(mm, (pte_t *)pudp, pud_pte(pud), nr, PUD_SIZE);
763 }
764 #define set_pud_at(mm, addr, pudp, pud) __set_puds(mm, addr, pudp, pud, 1)
765
766 #define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d))
767 #define __phys_to_p4d_val(phys) __phys_to_pte_val(phys)
768
769 #define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd))
770 #define __phys_to_pgd_val(phys) __phys_to_pte_val(phys)
771
772 #define __pgprot_modify(prot,mask,bits) \
773 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
774
775 #define pgprot_nx(prot) \
776 __pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN)
777
778 #define pgprot_decrypted(prot) \
779 __pgprot_modify(prot, PROT_NS_SHARED, PROT_NS_SHARED)
780 #define pgprot_encrypted(prot) \
781 __pgprot_modify(prot, PROT_NS_SHARED, 0)
782
783 /*
784 * Mark the prot value as uncacheable and unbufferable.
785 */
786 #define pgprot_noncached(prot) \
787 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
788 #define pgprot_writecombine(prot) \
789 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
790 #define pgprot_device(prot) \
791 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
792 #define pgprot_tagged(prot) \
793 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
794 #define pgprot_mhp pgprot_tagged
795 /*
796 * DMA allocations for non-coherent devices use what the Arm architecture calls
797 * "Normal non-cacheable" memory, which permits speculation, unaligned accesses
798 * and merging of writes. This is different from "Device-nGnR[nE]" memory which
799 * is intended for MMIO and thus forbids speculation, preserves access size,
800 * requires strict alignment and can also force write responses to come from the
801 * endpoint.
802 */
803 #define pgprot_dmacoherent(prot) \
804 __pgprot_modify(prot, PTE_ATTRINDX_MASK, \
805 PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
806
807 #define __HAVE_PHYS_MEM_ACCESS_PROT
808 struct file;
809 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
810 unsigned long size, pgprot_t vma_prot);
811
812 #define pmd_none(pmd) (!pmd_val(pmd))
813
814 #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
815 PMD_TYPE_TABLE)
816 #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
817 PMD_TYPE_SECT)
818 #define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd))
819 #define pmd_bad(pmd) (!pmd_table(pmd))
820
821 #define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
822 #define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE)
823
824 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_trans_huge(pmd_t pmd)825 static inline int pmd_trans_huge(pmd_t pmd)
826 {
827 /*
828 * If pmd is present-invalid, pmd_table() won't detect it
829 * as a table, so force the valid bit for the comparison.
830 */
831 return pmd_present(pmd) && !pmd_table(__pmd(pmd_val(pmd) | PTE_VALID));
832 }
833 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
834
835 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
pud_sect(pud_t pud)836 static inline bool pud_sect(pud_t pud) { return false; }
pud_table(pud_t pud)837 static inline bool pud_table(pud_t pud) { return true; }
838 #else
839 #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
840 PUD_TYPE_SECT)
841 #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
842 PUD_TYPE_TABLE)
843 #endif
844
845 extern pgd_t swapper_pg_dir[];
846 extern pgd_t idmap_pg_dir[];
847 extern pgd_t tramp_pg_dir[];
848 extern pgd_t reserved_pg_dir[];
849
850 extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
851
in_swapper_pgdir(void * addr)852 static inline bool in_swapper_pgdir(void *addr)
853 {
854 return ((unsigned long)addr & PAGE_MASK) ==
855 ((unsigned long)swapper_pg_dir & PAGE_MASK);
856 }
857
set_pmd(pmd_t * pmdp,pmd_t pmd)858 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
859 {
860 #ifdef __PAGETABLE_PMD_FOLDED
861 if (in_swapper_pgdir(pmdp)) {
862 set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
863 return;
864 }
865 #endif /* __PAGETABLE_PMD_FOLDED */
866
867 WRITE_ONCE(*pmdp, pmd);
868
869 if (pmd_valid(pmd))
870 queue_pte_barriers();
871 }
872
pmd_clear(pmd_t * pmdp)873 static inline void pmd_clear(pmd_t *pmdp)
874 {
875 set_pmd(pmdp, __pmd(0));
876 }
877
pmd_page_paddr(pmd_t pmd)878 static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
879 {
880 return __pmd_to_phys(pmd);
881 }
882
pmd_page_vaddr(pmd_t pmd)883 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
884 {
885 return (unsigned long)__va(pmd_page_paddr(pmd));
886 }
887
888 /* Find an entry in the third-level page table. */
889 #define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
890
891 #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
892 #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
893 #define pte_clear_fixmap() clear_fixmap(FIX_PTE)
894
895 #define pmd_page(pmd) phys_to_page(__pmd_to_phys(pmd))
896
897 /* use ONLY for statically allocated translation tables */
898 #define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
899
900 #if CONFIG_PGTABLE_LEVELS > 2
901
902 #define pmd_ERROR(e) \
903 pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
904
905 #define pud_none(pud) (!pud_val(pud))
906 #define pud_bad(pud) ((pud_val(pud) & PUD_TYPE_MASK) != \
907 PUD_TYPE_TABLE)
908 #define pud_present(pud) pte_present(pud_pte(pud))
909 #ifndef __PAGETABLE_PMD_FOLDED
910 #define pud_leaf(pud) (pud_present(pud) && !pud_table(pud))
911 #else
912 #define pud_leaf(pud) false
913 #endif
914 #define pud_valid(pud) pte_valid(pud_pte(pud))
915 #define pud_user(pud) pte_user(pud_pte(pud))
916 #define pud_user_exec(pud) pte_user_exec(pud_pte(pud))
917
918 static inline bool pgtable_l4_enabled(void);
919
set_pud(pud_t * pudp,pud_t pud)920 static inline void set_pud(pud_t *pudp, pud_t pud)
921 {
922 if (!pgtable_l4_enabled() && in_swapper_pgdir(pudp)) {
923 set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
924 return;
925 }
926
927 WRITE_ONCE(*pudp, pud);
928
929 if (pud_valid(pud))
930 queue_pte_barriers();
931 }
932
pud_clear(pud_t * pudp)933 static inline void pud_clear(pud_t *pudp)
934 {
935 set_pud(pudp, __pud(0));
936 }
937
pud_page_paddr(pud_t pud)938 static inline phys_addr_t pud_page_paddr(pud_t pud)
939 {
940 return __pud_to_phys(pud);
941 }
942
pud_pgtable(pud_t pud)943 static inline pmd_t *pud_pgtable(pud_t pud)
944 {
945 return (pmd_t *)__va(pud_page_paddr(pud));
946 }
947
948 /* Find an entry in the second-level page table. */
949 #define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
950
951 #define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
952 #define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
953 #define pmd_clear_fixmap() clear_fixmap(FIX_PMD)
954
955 #define pud_page(pud) phys_to_page(__pud_to_phys(pud))
956
957 /* use ONLY for statically allocated translation tables */
958 #define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
959
960 #else
961
962 #define pud_valid(pud) false
963 #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
964 #define pud_user_exec(pud) pud_user(pud) /* Always 0 with folding */
965
966 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
967 #define pmd_set_fixmap(addr) NULL
968 #define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp)
969 #define pmd_clear_fixmap()
970
971 #define pmd_offset_kimg(dir,addr) ((pmd_t *)dir)
972
973 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
974
975 #if CONFIG_PGTABLE_LEVELS > 3
976
pgtable_l4_enabled(void)977 static __always_inline bool pgtable_l4_enabled(void)
978 {
979 if (CONFIG_PGTABLE_LEVELS > 4 || !IS_ENABLED(CONFIG_ARM64_LPA2))
980 return true;
981 if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT))
982 return vabits_actual == VA_BITS;
983 return alternative_has_cap_unlikely(ARM64_HAS_VA52);
984 }
985
mm_pud_folded(const struct mm_struct * mm)986 static inline bool mm_pud_folded(const struct mm_struct *mm)
987 {
988 return !pgtable_l4_enabled();
989 }
990 #define mm_pud_folded mm_pud_folded
991
992 #define pud_ERROR(e) \
993 pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
994
995 #define p4d_none(p4d) (pgtable_l4_enabled() && !p4d_val(p4d))
996 #define p4d_bad(p4d) (pgtable_l4_enabled() && \
997 ((p4d_val(p4d) & P4D_TYPE_MASK) != \
998 P4D_TYPE_TABLE))
999 #define p4d_present(p4d) (!p4d_none(p4d))
1000
set_p4d(p4d_t * p4dp,p4d_t p4d)1001 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
1002 {
1003 if (in_swapper_pgdir(p4dp)) {
1004 set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d)));
1005 return;
1006 }
1007
1008 WRITE_ONCE(*p4dp, p4d);
1009 queue_pte_barriers();
1010 }
1011
p4d_clear(p4d_t * p4dp)1012 static inline void p4d_clear(p4d_t *p4dp)
1013 {
1014 if (pgtable_l4_enabled())
1015 set_p4d(p4dp, __p4d(0));
1016 }
1017
p4d_page_paddr(p4d_t p4d)1018 static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
1019 {
1020 return __p4d_to_phys(p4d);
1021 }
1022
1023 #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
1024
p4d_to_folded_pud(p4d_t * p4dp,unsigned long addr)1025 static inline pud_t *p4d_to_folded_pud(p4d_t *p4dp, unsigned long addr)
1026 {
1027 /* Ensure that 'p4dp' indexes a page table according to 'addr' */
1028 VM_BUG_ON(((addr >> P4D_SHIFT) ^ ((u64)p4dp >> 3)) % PTRS_PER_P4D);
1029
1030 return (pud_t *)PTR_ALIGN_DOWN(p4dp, PAGE_SIZE) + pud_index(addr);
1031 }
1032
p4d_pgtable(p4d_t p4d)1033 static inline pud_t *p4d_pgtable(p4d_t p4d)
1034 {
1035 return (pud_t *)__va(p4d_page_paddr(p4d));
1036 }
1037
pud_offset_phys(p4d_t * p4dp,unsigned long addr)1038 static inline phys_addr_t pud_offset_phys(p4d_t *p4dp, unsigned long addr)
1039 {
1040 BUG_ON(!pgtable_l4_enabled());
1041
1042 return p4d_page_paddr(READ_ONCE(*p4dp)) + pud_index(addr) * sizeof(pud_t);
1043 }
1044
1045 static inline
pud_offset_lockless(p4d_t * p4dp,p4d_t p4d,unsigned long addr)1046 pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long addr)
1047 {
1048 if (!pgtable_l4_enabled())
1049 return p4d_to_folded_pud(p4dp, addr);
1050 return (pud_t *)__va(p4d_page_paddr(p4d)) + pud_index(addr);
1051 }
1052 #define pud_offset_lockless pud_offset_lockless
1053
pud_offset(p4d_t * p4dp,unsigned long addr)1054 static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long addr)
1055 {
1056 return pud_offset_lockless(p4dp, READ_ONCE(*p4dp), addr);
1057 }
1058 #define pud_offset pud_offset
1059
pud_set_fixmap(unsigned long addr)1060 static inline pud_t *pud_set_fixmap(unsigned long addr)
1061 {
1062 if (!pgtable_l4_enabled())
1063 return NULL;
1064 return (pud_t *)set_fixmap_offset(FIX_PUD, addr);
1065 }
1066
pud_set_fixmap_offset(p4d_t * p4dp,unsigned long addr)1067 static inline pud_t *pud_set_fixmap_offset(p4d_t *p4dp, unsigned long addr)
1068 {
1069 if (!pgtable_l4_enabled())
1070 return p4d_to_folded_pud(p4dp, addr);
1071 return pud_set_fixmap(pud_offset_phys(p4dp, addr));
1072 }
1073
pud_clear_fixmap(void)1074 static inline void pud_clear_fixmap(void)
1075 {
1076 if (pgtable_l4_enabled())
1077 clear_fixmap(FIX_PUD);
1078 }
1079
1080 /* use ONLY for statically allocated translation tables */
pud_offset_kimg(p4d_t * p4dp,u64 addr)1081 static inline pud_t *pud_offset_kimg(p4d_t *p4dp, u64 addr)
1082 {
1083 if (!pgtable_l4_enabled())
1084 return p4d_to_folded_pud(p4dp, addr);
1085 return (pud_t *)__phys_to_kimg(pud_offset_phys(p4dp, addr));
1086 }
1087
1088 #define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
1089
1090 #else
1091
pgtable_l4_enabled(void)1092 static inline bool pgtable_l4_enabled(void) { return false; }
1093
1094 #define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;})
1095
1096 /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
1097 #define pud_set_fixmap(addr) NULL
1098 #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp)
1099 #define pud_clear_fixmap()
1100
1101 #define pud_offset_kimg(dir,addr) ((pud_t *)dir)
1102
1103 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
1104
1105 #if CONFIG_PGTABLE_LEVELS > 4
1106
pgtable_l5_enabled(void)1107 static __always_inline bool pgtable_l5_enabled(void)
1108 {
1109 if (!alternative_has_cap_likely(ARM64_ALWAYS_BOOT))
1110 return vabits_actual == VA_BITS;
1111 return alternative_has_cap_unlikely(ARM64_HAS_VA52);
1112 }
1113
mm_p4d_folded(const struct mm_struct * mm)1114 static inline bool mm_p4d_folded(const struct mm_struct *mm)
1115 {
1116 return !pgtable_l5_enabled();
1117 }
1118 #define mm_p4d_folded mm_p4d_folded
1119
1120 #define p4d_ERROR(e) \
1121 pr_err("%s:%d: bad p4d %016llx.\n", __FILE__, __LINE__, p4d_val(e))
1122
1123 #define pgd_none(pgd) (pgtable_l5_enabled() && !pgd_val(pgd))
1124 #define pgd_bad(pgd) (pgtable_l5_enabled() && \
1125 ((pgd_val(pgd) & PGD_TYPE_MASK) != \
1126 PGD_TYPE_TABLE))
1127 #define pgd_present(pgd) (!pgd_none(pgd))
1128
set_pgd(pgd_t * pgdp,pgd_t pgd)1129 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
1130 {
1131 if (in_swapper_pgdir(pgdp)) {
1132 set_swapper_pgd(pgdp, __pgd(pgd_val(pgd)));
1133 return;
1134 }
1135
1136 WRITE_ONCE(*pgdp, pgd);
1137 queue_pte_barriers();
1138 }
1139
pgd_clear(pgd_t * pgdp)1140 static inline void pgd_clear(pgd_t *pgdp)
1141 {
1142 if (pgtable_l5_enabled())
1143 set_pgd(pgdp, __pgd(0));
1144 }
1145
pgd_page_paddr(pgd_t pgd)1146 static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
1147 {
1148 return __pgd_to_phys(pgd);
1149 }
1150
1151 #define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
1152
pgd_to_folded_p4d(pgd_t * pgdp,unsigned long addr)1153 static inline p4d_t *pgd_to_folded_p4d(pgd_t *pgdp, unsigned long addr)
1154 {
1155 /* Ensure that 'pgdp' indexes a page table according to 'addr' */
1156 VM_BUG_ON(((addr >> PGDIR_SHIFT) ^ ((u64)pgdp >> 3)) % PTRS_PER_PGD);
1157
1158 return (p4d_t *)PTR_ALIGN_DOWN(pgdp, PAGE_SIZE) + p4d_index(addr);
1159 }
1160
p4d_offset_phys(pgd_t * pgdp,unsigned long addr)1161 static inline phys_addr_t p4d_offset_phys(pgd_t *pgdp, unsigned long addr)
1162 {
1163 BUG_ON(!pgtable_l5_enabled());
1164
1165 return pgd_page_paddr(READ_ONCE(*pgdp)) + p4d_index(addr) * sizeof(p4d_t);
1166 }
1167
1168 static inline
p4d_offset_lockless(pgd_t * pgdp,pgd_t pgd,unsigned long addr)1169 p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long addr)
1170 {
1171 if (!pgtable_l5_enabled())
1172 return pgd_to_folded_p4d(pgdp, addr);
1173 return (p4d_t *)__va(pgd_page_paddr(pgd)) + p4d_index(addr);
1174 }
1175 #define p4d_offset_lockless p4d_offset_lockless
1176
p4d_offset(pgd_t * pgdp,unsigned long addr)1177 static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long addr)
1178 {
1179 return p4d_offset_lockless(pgdp, READ_ONCE(*pgdp), addr);
1180 }
1181
p4d_set_fixmap(unsigned long addr)1182 static inline p4d_t *p4d_set_fixmap(unsigned long addr)
1183 {
1184 if (!pgtable_l5_enabled())
1185 return NULL;
1186 return (p4d_t *)set_fixmap_offset(FIX_P4D, addr);
1187 }
1188
p4d_set_fixmap_offset(pgd_t * pgdp,unsigned long addr)1189 static inline p4d_t *p4d_set_fixmap_offset(pgd_t *pgdp, unsigned long addr)
1190 {
1191 if (!pgtable_l5_enabled())
1192 return pgd_to_folded_p4d(pgdp, addr);
1193 return p4d_set_fixmap(p4d_offset_phys(pgdp, addr));
1194 }
1195
p4d_clear_fixmap(void)1196 static inline void p4d_clear_fixmap(void)
1197 {
1198 if (pgtable_l5_enabled())
1199 clear_fixmap(FIX_P4D);
1200 }
1201
1202 /* use ONLY for statically allocated translation tables */
p4d_offset_kimg(pgd_t * pgdp,u64 addr)1203 static inline p4d_t *p4d_offset_kimg(pgd_t *pgdp, u64 addr)
1204 {
1205 if (!pgtable_l5_enabled())
1206 return pgd_to_folded_p4d(pgdp, addr);
1207 return (p4d_t *)__phys_to_kimg(p4d_offset_phys(pgdp, addr));
1208 }
1209
1210 #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd)))
1211
1212 #else
1213
pgtable_l5_enabled(void)1214 static inline bool pgtable_l5_enabled(void) { return false; }
1215
1216 #define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
1217
1218 /* Match p4d_offset folding in <asm/generic/pgtable-nop4d.h> */
1219 #define p4d_set_fixmap(addr) NULL
1220 #define p4d_set_fixmap_offset(p4dp, addr) ((p4d_t *)p4dp)
1221 #define p4d_clear_fixmap()
1222
1223 #define p4d_offset_kimg(dir,addr) ((p4d_t *)dir)
1224
1225 static inline
p4d_offset_lockless_folded(pgd_t * pgdp,pgd_t pgd,unsigned long addr)1226 p4d_t *p4d_offset_lockless_folded(pgd_t *pgdp, pgd_t pgd, unsigned long addr)
1227 {
1228 /*
1229 * With runtime folding of the pud, pud_offset_lockless() passes
1230 * the 'pgd_t *' we return here to p4d_to_folded_pud(), which
1231 * will offset the pointer assuming that it points into
1232 * a page-table page. However, the fast GUP path passes us a
1233 * pgd_t allocated on the stack and so we must use the original
1234 * pointer in 'pgdp' to construct the p4d pointer instead of
1235 * using the generic p4d_offset_lockless() implementation.
1236 *
1237 * Note: reusing the original pointer means that we may
1238 * dereference the same (live) page-table entry multiple times.
1239 * This is safe because it is still only loaded once in the
1240 * context of each level and the CPU guarantees same-address
1241 * read-after-read ordering.
1242 */
1243 return p4d_offset(pgdp, addr);
1244 }
1245 #define p4d_offset_lockless p4d_offset_lockless_folded
1246
1247 #endif /* CONFIG_PGTABLE_LEVELS > 4 */
1248
1249 #define pgd_ERROR(e) \
1250 pr_err("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
1251
1252 #define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
1253 #define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
1254
pte_modify(pte_t pte,pgprot_t newprot)1255 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
1256 {
1257 /*
1258 * Normal and Normal-Tagged are two different memory types and indices
1259 * in MAIR_EL1. The mask below has to include PTE_ATTRINDX_MASK.
1260 */
1261 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
1262 PTE_PRESENT_INVALID | PTE_VALID | PTE_WRITE |
1263 PTE_GP | PTE_ATTRINDX_MASK | PTE_PO_IDX_MASK;
1264
1265 /* preserve the hardware dirty information */
1266 if (pte_hw_dirty(pte))
1267 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
1268
1269 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
1270 /*
1271 * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware
1272 * dirtiness again.
1273 */
1274 if (pte_sw_dirty(pte))
1275 pte = pte_mkdirty(pte);
1276 return pte;
1277 }
1278
pmd_modify(pmd_t pmd,pgprot_t newprot)1279 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1280 {
1281 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
1282 }
1283
1284 extern int __ptep_set_access_flags(struct vm_area_struct *vma,
1285 unsigned long address, pte_t *ptep,
1286 pte_t entry, int dirty);
1287
1288 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1289 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t entry,int dirty)1290 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1291 unsigned long address, pmd_t *pmdp,
1292 pmd_t entry, int dirty)
1293 {
1294 return __ptep_set_access_flags(vma, address, (pte_t *)pmdp,
1295 pmd_pte(entry), dirty);
1296 }
1297 #endif
1298
1299 #ifdef CONFIG_PAGE_TABLE_CHECK
pte_user_accessible_page(pte_t pte)1300 static inline bool pte_user_accessible_page(pte_t pte)
1301 {
1302 return pte_valid(pte) && (pte_user(pte) || pte_user_exec(pte));
1303 }
1304
pmd_user_accessible_page(pmd_t pmd)1305 static inline bool pmd_user_accessible_page(pmd_t pmd)
1306 {
1307 return pmd_valid(pmd) && !pmd_table(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
1308 }
1309
pud_user_accessible_page(pud_t pud)1310 static inline bool pud_user_accessible_page(pud_t pud)
1311 {
1312 return pud_valid(pud) && !pud_table(pud) && (pud_user(pud) || pud_user_exec(pud));
1313 }
1314 #endif
1315
1316 /*
1317 * Atomic pte/pmd modifications.
1318 */
__ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)1319 static inline int __ptep_test_and_clear_young(struct vm_area_struct *vma,
1320 unsigned long address,
1321 pte_t *ptep)
1322 {
1323 pte_t old_pte, pte;
1324
1325 pte = __ptep_get(ptep);
1326 do {
1327 old_pte = pte;
1328 pte = pte_mkold(pte);
1329 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
1330 pte_val(old_pte), pte_val(pte));
1331 } while (pte_val(pte) != pte_val(old_pte));
1332
1333 return pte_young(pte);
1334 }
1335
__ptep_clear_flush_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)1336 static inline int __ptep_clear_flush_young(struct vm_area_struct *vma,
1337 unsigned long address, pte_t *ptep)
1338 {
1339 int young = __ptep_test_and_clear_young(vma, address, ptep);
1340
1341 if (young) {
1342 /*
1343 * We can elide the trailing DSB here since the worst that can
1344 * happen is that a CPU continues to use the young entry in its
1345 * TLB and we mistakenly reclaim the associated page. The
1346 * window for such an event is bounded by the next
1347 * context-switch, which provides a DSB to complete the TLB
1348 * invalidation.
1349 */
1350 flush_tlb_page_nosync(vma, address);
1351 }
1352
1353 return young;
1354 }
1355
1356 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
1357 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)1358 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1359 unsigned long address,
1360 pmd_t *pmdp)
1361 {
1362 /* Operation applies to PMD table entry only if FEAT_HAFT is enabled */
1363 VM_WARN_ON(pmd_table(READ_ONCE(*pmdp)) && !system_supports_haft());
1364 return __ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
1365 }
1366 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */
1367
__ptep_get_and_clear_anysz(struct mm_struct * mm,pte_t * ptep,unsigned long pgsize)1368 static inline pte_t __ptep_get_and_clear_anysz(struct mm_struct *mm,
1369 pte_t *ptep,
1370 unsigned long pgsize)
1371 {
1372 pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0));
1373
1374 switch (pgsize) {
1375 case PAGE_SIZE:
1376 page_table_check_pte_clear(mm, pte);
1377 break;
1378 case PMD_SIZE:
1379 page_table_check_pmd_clear(mm, pte_pmd(pte));
1380 break;
1381 #ifndef __PAGETABLE_PMD_FOLDED
1382 case PUD_SIZE:
1383 page_table_check_pud_clear(mm, pte_pud(pte));
1384 break;
1385 #endif
1386 default:
1387 VM_WARN_ON(1);
1388 }
1389
1390 return pte;
1391 }
1392
__ptep_get_and_clear(struct mm_struct * mm,unsigned long address,pte_t * ptep)1393 static inline pte_t __ptep_get_and_clear(struct mm_struct *mm,
1394 unsigned long address, pte_t *ptep)
1395 {
1396 return __ptep_get_and_clear_anysz(mm, ptep, PAGE_SIZE);
1397 }
1398
__clear_full_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr,int full)1399 static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr,
1400 pte_t *ptep, unsigned int nr, int full)
1401 {
1402 for (;;) {
1403 __ptep_get_and_clear(mm, addr, ptep);
1404 if (--nr == 0)
1405 break;
1406 ptep++;
1407 addr += PAGE_SIZE;
1408 }
1409 }
1410
__get_and_clear_full_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr,int full)1411 static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm,
1412 unsigned long addr, pte_t *ptep,
1413 unsigned int nr, int full)
1414 {
1415 pte_t pte, tmp_pte;
1416
1417 pte = __ptep_get_and_clear(mm, addr, ptep);
1418 while (--nr) {
1419 ptep++;
1420 addr += PAGE_SIZE;
1421 tmp_pte = __ptep_get_and_clear(mm, addr, ptep);
1422 if (pte_dirty(tmp_pte))
1423 pte = pte_mkdirty(pte);
1424 if (pte_young(tmp_pte))
1425 pte = pte_mkyoung(pte);
1426 }
1427 return pte;
1428 }
1429
1430 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1431 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)1432 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1433 unsigned long address, pmd_t *pmdp)
1434 {
1435 return pte_pmd(__ptep_get_and_clear_anysz(mm, (pte_t *)pmdp, PMD_SIZE));
1436 }
1437 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1438
___ptep_set_wrprotect(struct mm_struct * mm,unsigned long address,pte_t * ptep,pte_t pte)1439 static inline void ___ptep_set_wrprotect(struct mm_struct *mm,
1440 unsigned long address, pte_t *ptep,
1441 pte_t pte)
1442 {
1443 pte_t old_pte;
1444
1445 do {
1446 old_pte = pte;
1447 pte = pte_wrprotect(pte);
1448 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
1449 pte_val(old_pte), pte_val(pte));
1450 } while (pte_val(pte) != pte_val(old_pte));
1451 }
1452
1453 /*
1454 * __ptep_set_wrprotect - mark read-only while transferring potential hardware
1455 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
1456 */
__ptep_set_wrprotect(struct mm_struct * mm,unsigned long address,pte_t * ptep)1457 static inline void __ptep_set_wrprotect(struct mm_struct *mm,
1458 unsigned long address, pte_t *ptep)
1459 {
1460 ___ptep_set_wrprotect(mm, address, ptep, __ptep_get(ptep));
1461 }
1462
__wrprotect_ptes(struct mm_struct * mm,unsigned long address,pte_t * ptep,unsigned int nr)1463 static inline void __wrprotect_ptes(struct mm_struct *mm, unsigned long address,
1464 pte_t *ptep, unsigned int nr)
1465 {
1466 unsigned int i;
1467
1468 for (i = 0; i < nr; i++, address += PAGE_SIZE, ptep++)
1469 __ptep_set_wrprotect(mm, address, ptep);
1470 }
1471
__clear_young_dirty_pte(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t pte,cydp_t flags)1472 static inline void __clear_young_dirty_pte(struct vm_area_struct *vma,
1473 unsigned long addr, pte_t *ptep,
1474 pte_t pte, cydp_t flags)
1475 {
1476 pte_t old_pte;
1477
1478 do {
1479 old_pte = pte;
1480
1481 if (flags & CYDP_CLEAR_YOUNG)
1482 pte = pte_mkold(pte);
1483 if (flags & CYDP_CLEAR_DIRTY)
1484 pte = pte_mkclean(pte);
1485
1486 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
1487 pte_val(old_pte), pte_val(pte));
1488 } while (pte_val(pte) != pte_val(old_pte));
1489 }
1490
__clear_young_dirty_ptes(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,unsigned int nr,cydp_t flags)1491 static inline void __clear_young_dirty_ptes(struct vm_area_struct *vma,
1492 unsigned long addr, pte_t *ptep,
1493 unsigned int nr, cydp_t flags)
1494 {
1495 pte_t pte;
1496
1497 for (;;) {
1498 pte = __ptep_get(ptep);
1499
1500 if (flags == (CYDP_CLEAR_YOUNG | CYDP_CLEAR_DIRTY))
1501 __set_pte(ptep, pte_mkclean(pte_mkold(pte)));
1502 else
1503 __clear_young_dirty_pte(vma, addr, ptep, pte, flags);
1504
1505 if (--nr == 0)
1506 break;
1507 ptep++;
1508 addr += PAGE_SIZE;
1509 }
1510 }
1511
1512 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1513 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)1514 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1515 unsigned long address, pmd_t *pmdp)
1516 {
1517 __ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
1518 }
1519
1520 #define pmdp_establish pmdp_establish
pmdp_establish(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t pmd)1521 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1522 unsigned long address, pmd_t *pmdp, pmd_t pmd)
1523 {
1524 page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
1525 return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
1526 }
1527 #endif
1528
1529 /*
1530 * Encode and decode a swap entry:
1531 * bits 0-1: present (must be zero)
1532 * bits 2: remember PG_anon_exclusive
1533 * bit 3: remember uffd-wp state
1534 * bits 6-10: swap type
1535 * bit 11: PTE_PRESENT_INVALID (must be zero)
1536 * bits 12-61: swap offset
1537 */
1538 #define __SWP_TYPE_SHIFT 6
1539 #define __SWP_TYPE_BITS 5
1540 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
1541 #define __SWP_OFFSET_SHIFT 12
1542 #define __SWP_OFFSET_BITS 50
1543 #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
1544
1545 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
1546 #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
1547 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
1548
1549 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1550 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
1551
1552 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1553 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
1554 #define __swp_entry_to_pmd(swp) __pmd((swp).val)
1555 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
1556
1557 /*
1558 * Ensure that there are not more swap files than can be encoded in the kernel
1559 * PTEs.
1560 */
1561 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
1562
1563 #ifdef CONFIG_ARM64_MTE
1564
1565 #define __HAVE_ARCH_PREPARE_TO_SWAP
1566 extern int arch_prepare_to_swap(struct folio *folio);
1567
1568 #define __HAVE_ARCH_SWAP_INVALIDATE
arch_swap_invalidate_page(int type,pgoff_t offset)1569 static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
1570 {
1571 if (system_supports_mte())
1572 mte_invalidate_tags(type, offset);
1573 }
1574
arch_swap_invalidate_area(int type)1575 static inline void arch_swap_invalidate_area(int type)
1576 {
1577 if (system_supports_mte())
1578 mte_invalidate_tags_area(type);
1579 }
1580
1581 #define __HAVE_ARCH_SWAP_RESTORE
1582 extern void arch_swap_restore(swp_entry_t entry, struct folio *folio);
1583
1584 #endif /* CONFIG_ARM64_MTE */
1585
1586 /*
1587 * On AArch64, the cache coherency is handled via the __set_ptes() function.
1588 */
update_mmu_cache_range(struct vm_fault * vmf,struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,unsigned int nr)1589 static inline void update_mmu_cache_range(struct vm_fault *vmf,
1590 struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
1591 unsigned int nr)
1592 {
1593 /*
1594 * We don't do anything here, so there's a very small chance of
1595 * us retaking a user fault which we just fixed up. The alternative
1596 * is doing a dsb(ishst), but that penalises the fastpath.
1597 */
1598 }
1599
1600 #define update_mmu_cache(vma, addr, ptep) \
1601 update_mmu_cache_range(NULL, vma, addr, ptep, 1)
1602 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
1603
1604 #ifdef CONFIG_ARM64_PA_BITS_52
1605 #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
1606 #else
1607 #define phys_to_ttbr(addr) (addr)
1608 #endif
1609
1610 /*
1611 * On arm64 without hardware Access Flag, copying from user will fail because
1612 * the pte is old and cannot be marked young. So we always end up with zeroed
1613 * page after fork() + CoW for pfn mappings. We don't always have a
1614 * hardware-managed access flag on arm64.
1615 */
1616 #define arch_has_hw_pte_young cpu_has_hw_af
1617
1618 #ifdef CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG
1619 #define arch_has_hw_nonleaf_pmd_young system_supports_haft
1620 #endif
1621
1622 /*
1623 * Experimentally, it's cheap to set the access flag in hardware and we
1624 * benefit from prefaulting mappings as 'old' to start with.
1625 */
1626 #define arch_wants_old_prefaulted_pte cpu_has_hw_af
1627
1628 /*
1629 * Request exec memory is read into pagecache in at least 64K folios. This size
1630 * can be contpte-mapped when 4K base pages are in use (16 pages into 1 iTLB
1631 * entry), and HPA can coalesce it (4 pages into 1 TLB entry) when 16K base
1632 * pages are in use.
1633 */
1634 #define exec_folio_order() ilog2(SZ_64K >> PAGE_SHIFT)
1635
pud_sect_supported(void)1636 static inline bool pud_sect_supported(void)
1637 {
1638 return PAGE_SIZE == SZ_4K;
1639 }
1640
1641
1642 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1643 #define ptep_modify_prot_start ptep_modify_prot_start
1644 extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
1645 unsigned long addr, pte_t *ptep);
1646
1647 #define ptep_modify_prot_commit ptep_modify_prot_commit
1648 extern void ptep_modify_prot_commit(struct vm_area_struct *vma,
1649 unsigned long addr, pte_t *ptep,
1650 pte_t old_pte, pte_t new_pte);
1651
1652 #define modify_prot_start_ptes modify_prot_start_ptes
1653 extern pte_t modify_prot_start_ptes(struct vm_area_struct *vma,
1654 unsigned long addr, pte_t *ptep,
1655 unsigned int nr);
1656
1657 #define modify_prot_commit_ptes modify_prot_commit_ptes
1658 extern void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr,
1659 pte_t *ptep, pte_t old_pte, pte_t pte,
1660 unsigned int nr);
1661
1662 #ifdef CONFIG_ARM64_CONTPTE
1663
1664 /*
1665 * The contpte APIs are used to transparently manage the contiguous bit in ptes
1666 * where it is possible and makes sense to do so. The PTE_CONT bit is considered
1667 * a private implementation detail of the public ptep API (see below).
1668 */
1669 extern void __contpte_try_fold(struct mm_struct *mm, unsigned long addr,
1670 pte_t *ptep, pte_t pte);
1671 extern void __contpte_try_unfold(struct mm_struct *mm, unsigned long addr,
1672 pte_t *ptep, pte_t pte);
1673 extern pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte);
1674 extern pte_t contpte_ptep_get_lockless(pte_t *orig_ptep);
1675 extern void contpte_set_ptes(struct mm_struct *mm, unsigned long addr,
1676 pte_t *ptep, pte_t pte, unsigned int nr);
1677 extern void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr,
1678 pte_t *ptep, unsigned int nr, int full);
1679 extern pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm,
1680 unsigned long addr, pte_t *ptep,
1681 unsigned int nr, int full);
1682 extern int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma,
1683 unsigned long addr, pte_t *ptep);
1684 extern int contpte_ptep_clear_flush_young(struct vm_area_struct *vma,
1685 unsigned long addr, pte_t *ptep);
1686 extern void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
1687 pte_t *ptep, unsigned int nr);
1688 extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma,
1689 unsigned long addr, pte_t *ptep,
1690 pte_t entry, int dirty);
1691 extern void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma,
1692 unsigned long addr, pte_t *ptep,
1693 unsigned int nr, cydp_t flags);
1694
contpte_try_fold(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)1695 static __always_inline void contpte_try_fold(struct mm_struct *mm,
1696 unsigned long addr, pte_t *ptep, pte_t pte)
1697 {
1698 /*
1699 * Only bother trying if both the virtual and physical addresses are
1700 * aligned and correspond to the last entry in a contig range. The core
1701 * code mostly modifies ranges from low to high, so this is the likely
1702 * the last modification in the contig range, so a good time to fold.
1703 * We can't fold special mappings, because there is no associated folio.
1704 */
1705
1706 const unsigned long contmask = CONT_PTES - 1;
1707 bool valign = ((addr >> PAGE_SHIFT) & contmask) == contmask;
1708
1709 if (unlikely(valign)) {
1710 bool palign = (pte_pfn(pte) & contmask) == contmask;
1711
1712 if (unlikely(palign &&
1713 pte_valid(pte) && !pte_cont(pte) && !pte_special(pte)))
1714 __contpte_try_fold(mm, addr, ptep, pte);
1715 }
1716 }
1717
contpte_try_unfold(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)1718 static __always_inline void contpte_try_unfold(struct mm_struct *mm,
1719 unsigned long addr, pte_t *ptep, pte_t pte)
1720 {
1721 if (unlikely(pte_valid_cont(pte)))
1722 __contpte_try_unfold(mm, addr, ptep, pte);
1723 }
1724
1725 #define pte_batch_hint pte_batch_hint
pte_batch_hint(pte_t * ptep,pte_t pte)1726 static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte)
1727 {
1728 if (!pte_valid_cont(pte))
1729 return 1;
1730
1731 return CONT_PTES - (((unsigned long)ptep >> 3) & (CONT_PTES - 1));
1732 }
1733
1734 /*
1735 * The below functions constitute the public API that arm64 presents to the
1736 * core-mm to manipulate PTE entries within their page tables (or at least this
1737 * is the subset of the API that arm64 needs to implement). These public
1738 * versions will automatically and transparently apply the contiguous bit where
1739 * it makes sense to do so. Therefore any users that are contig-aware (e.g.
1740 * hugetlb, kernel mapper) should NOT use these APIs, but instead use the
1741 * private versions, which are prefixed with double underscore. All of these
1742 * APIs except for ptep_get_lockless() are expected to be called with the PTL
1743 * held. Although the contiguous bit is considered private to the
1744 * implementation, it is deliberately allowed to leak through the getters (e.g.
1745 * ptep_get()), back to core code. This is required so that pte_leaf_size() can
1746 * provide an accurate size for perf_get_pgtable_size(). But this leakage means
1747 * its possible a pte will be passed to a setter with the contiguous bit set, so
1748 * we explicitly clear the contiguous bit in those cases to prevent accidentally
1749 * setting it in the pgtable.
1750 */
1751
1752 #define ptep_get ptep_get
ptep_get(pte_t * ptep)1753 static inline pte_t ptep_get(pte_t *ptep)
1754 {
1755 pte_t pte = __ptep_get(ptep);
1756
1757 if (likely(!pte_valid_cont(pte)))
1758 return pte;
1759
1760 return contpte_ptep_get(ptep, pte);
1761 }
1762
1763 #define ptep_get_lockless ptep_get_lockless
ptep_get_lockless(pte_t * ptep)1764 static inline pte_t ptep_get_lockless(pte_t *ptep)
1765 {
1766 pte_t pte = __ptep_get(ptep);
1767
1768 if (likely(!pte_valid_cont(pte)))
1769 return pte;
1770
1771 return contpte_ptep_get_lockless(ptep);
1772 }
1773
set_pte(pte_t * ptep,pte_t pte)1774 static inline void set_pte(pte_t *ptep, pte_t pte)
1775 {
1776 /*
1777 * We don't have the mm or vaddr so cannot unfold contig entries (since
1778 * it requires tlb maintenance). set_pte() is not used in core code, so
1779 * this should never even be called. Regardless do our best to service
1780 * any call and emit a warning if there is any attempt to set a pte on
1781 * top of an existing contig range.
1782 */
1783 pte_t orig_pte = __ptep_get(ptep);
1784
1785 WARN_ON_ONCE(pte_valid_cont(orig_pte));
1786 __set_pte(ptep, pte_mknoncont(pte));
1787 }
1788
1789 #define set_ptes set_ptes
set_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned int nr)1790 static __always_inline void set_ptes(struct mm_struct *mm, unsigned long addr,
1791 pte_t *ptep, pte_t pte, unsigned int nr)
1792 {
1793 pte = pte_mknoncont(pte);
1794
1795 if (likely(nr == 1)) {
1796 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep));
1797 __set_ptes(mm, addr, ptep, pte, 1);
1798 contpte_try_fold(mm, addr, ptep, pte);
1799 } else {
1800 contpte_set_ptes(mm, addr, ptep, pte, nr);
1801 }
1802 }
1803
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1804 static inline void pte_clear(struct mm_struct *mm,
1805 unsigned long addr, pte_t *ptep)
1806 {
1807 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep));
1808 __pte_clear(mm, addr, ptep);
1809 }
1810
1811 #define clear_full_ptes clear_full_ptes
clear_full_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr,int full)1812 static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr,
1813 pte_t *ptep, unsigned int nr, int full)
1814 {
1815 if (likely(nr == 1)) {
1816 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep));
1817 __clear_full_ptes(mm, addr, ptep, nr, full);
1818 } else {
1819 contpte_clear_full_ptes(mm, addr, ptep, nr, full);
1820 }
1821 }
1822
1823 #define get_and_clear_full_ptes get_and_clear_full_ptes
get_and_clear_full_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr,int full)1824 static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm,
1825 unsigned long addr, pte_t *ptep,
1826 unsigned int nr, int full)
1827 {
1828 pte_t pte;
1829
1830 if (likely(nr == 1)) {
1831 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep));
1832 pte = __get_and_clear_full_ptes(mm, addr, ptep, nr, full);
1833 } else {
1834 pte = contpte_get_and_clear_full_ptes(mm, addr, ptep, nr, full);
1835 }
1836
1837 return pte;
1838 }
1839
1840 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1841 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1842 unsigned long addr, pte_t *ptep)
1843 {
1844 contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep));
1845 return __ptep_get_and_clear(mm, addr, ptep);
1846 }
1847
1848 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1849 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1850 unsigned long addr, pte_t *ptep)
1851 {
1852 pte_t orig_pte = __ptep_get(ptep);
1853
1854 if (likely(!pte_valid_cont(orig_pte)))
1855 return __ptep_test_and_clear_young(vma, addr, ptep);
1856
1857 return contpte_ptep_test_and_clear_young(vma, addr, ptep);
1858 }
1859
1860 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
ptep_clear_flush_young(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1861 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1862 unsigned long addr, pte_t *ptep)
1863 {
1864 pte_t orig_pte = __ptep_get(ptep);
1865
1866 if (likely(!pte_valid_cont(orig_pte)))
1867 return __ptep_clear_flush_young(vma, addr, ptep);
1868
1869 return contpte_ptep_clear_flush_young(vma, addr, ptep);
1870 }
1871
1872 #define wrprotect_ptes wrprotect_ptes
wrprotect_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr)1873 static __always_inline void wrprotect_ptes(struct mm_struct *mm,
1874 unsigned long addr, pte_t *ptep, unsigned int nr)
1875 {
1876 if (likely(nr == 1)) {
1877 /*
1878 * Optimization: wrprotect_ptes() can only be called for present
1879 * ptes so we only need to check contig bit as condition for
1880 * unfold, and we can remove the contig bit from the pte we read
1881 * to avoid re-reading. This speeds up fork() which is sensitive
1882 * for order-0 folios. Equivalent to contpte_try_unfold().
1883 */
1884 pte_t orig_pte = __ptep_get(ptep);
1885
1886 if (unlikely(pte_cont(orig_pte))) {
1887 __contpte_try_unfold(mm, addr, ptep, orig_pte);
1888 orig_pte = pte_mknoncont(orig_pte);
1889 }
1890 ___ptep_set_wrprotect(mm, addr, ptep, orig_pte);
1891 } else {
1892 contpte_wrprotect_ptes(mm, addr, ptep, nr);
1893 }
1894 }
1895
1896 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1897 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1898 unsigned long addr, pte_t *ptep)
1899 {
1900 wrprotect_ptes(mm, addr, ptep, 1);
1901 }
1902
1903 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
ptep_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t entry,int dirty)1904 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1905 unsigned long addr, pte_t *ptep,
1906 pte_t entry, int dirty)
1907 {
1908 pte_t orig_pte = __ptep_get(ptep);
1909
1910 entry = pte_mknoncont(entry);
1911
1912 if (likely(!pte_valid_cont(orig_pte)))
1913 return __ptep_set_access_flags(vma, addr, ptep, entry, dirty);
1914
1915 return contpte_ptep_set_access_flags(vma, addr, ptep, entry, dirty);
1916 }
1917
1918 #define clear_young_dirty_ptes clear_young_dirty_ptes
clear_young_dirty_ptes(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,unsigned int nr,cydp_t flags)1919 static inline void clear_young_dirty_ptes(struct vm_area_struct *vma,
1920 unsigned long addr, pte_t *ptep,
1921 unsigned int nr, cydp_t flags)
1922 {
1923 if (likely(nr == 1 && !pte_cont(__ptep_get(ptep))))
1924 __clear_young_dirty_ptes(vma, addr, ptep, nr, flags);
1925 else
1926 contpte_clear_young_dirty_ptes(vma, addr, ptep, nr, flags);
1927 }
1928
1929 #else /* CONFIG_ARM64_CONTPTE */
1930
1931 #define ptep_get __ptep_get
1932 #define set_pte __set_pte
1933 #define set_ptes __set_ptes
1934 #define pte_clear __pte_clear
1935 #define clear_full_ptes __clear_full_ptes
1936 #define get_and_clear_full_ptes __get_and_clear_full_ptes
1937 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1938 #define ptep_get_and_clear __ptep_get_and_clear
1939 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1940 #define ptep_test_and_clear_young __ptep_test_and_clear_young
1941 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1942 #define ptep_clear_flush_young __ptep_clear_flush_young
1943 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1944 #define ptep_set_wrprotect __ptep_set_wrprotect
1945 #define wrprotect_ptes __wrprotect_ptes
1946 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1947 #define ptep_set_access_flags __ptep_set_access_flags
1948 #define clear_young_dirty_ptes __clear_young_dirty_ptes
1949
1950 #endif /* CONFIG_ARM64_CONTPTE */
1951
1952 #endif /* !__ASSEMBLY__ */
1953
1954 #endif /* __ASM_PGTABLE_H */
1955