1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PGTABLE_H
3 #define _LINUX_PGTABLE_H
4
5 #include <linux/pfn.h>
6 #include <asm/pgtable.h>
7
8 #define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT)
9 #define PUD_ORDER (PUD_SHIFT - PAGE_SHIFT)
10
11 #ifndef __ASSEMBLY__
12 #ifdef CONFIG_MMU
13
14 #include <linux/mm_types.h>
15 #include <linux/bug.h>
16 #include <linux/errno.h>
17 #include <asm-generic/pgtable_uffd.h>
18 #include <linux/page_table_check.h>
19
20 #if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
21 defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
22 #error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
23 #endif
24
25 /*
26 * On almost all architectures and configurations, 0 can be used as the
27 * upper ceiling to free_pgtables(): on many architectures it has the same
28 * effect as using TASK_SIZE. However, there is one configuration which
29 * must impose a more careful limit, to avoid freeing kernel pgtables.
30 */
31 #ifndef USER_PGTABLES_CEILING
32 #define USER_PGTABLES_CEILING 0UL
33 #endif
34
35 /*
36 * This defines the first usable user address. Platforms
37 * can override its value with custom FIRST_USER_ADDRESS
38 * defined in their respective <asm/pgtable.h>.
39 */
40 #ifndef FIRST_USER_ADDRESS
41 #define FIRST_USER_ADDRESS 0UL
42 #endif
43
44 /*
45 * This defines the generic helper for accessing PMD page
46 * table page. Although platforms can still override this
47 * via their respective <asm/pgtable.h>.
48 */
49 #ifndef pmd_pgtable
50 #define pmd_pgtable(pmd) pmd_page(pmd)
51 #endif
52
53 #define pmd_folio(pmd) page_folio(pmd_page(pmd))
54
55 /*
56 * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD]
57 *
58 * The pXx_index() functions return the index of the entry in the page
59 * table page which would control the given virtual address
60 *
61 * As these functions may be used by the same code for different levels of
62 * the page table folding, they are always available, regardless of
63 * CONFIG_PGTABLE_LEVELS value. For the folded levels they simply return 0
64 * because in such cases PTRS_PER_PxD equals 1.
65 */
66
pte_index(unsigned long address)67 static inline unsigned long pte_index(unsigned long address)
68 {
69 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
70 }
71
72 #ifndef pmd_index
pmd_index(unsigned long address)73 static inline unsigned long pmd_index(unsigned long address)
74 {
75 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
76 }
77 #define pmd_index pmd_index
78 #endif
79
80 #ifndef pud_index
pud_index(unsigned long address)81 static inline unsigned long pud_index(unsigned long address)
82 {
83 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
84 }
85 #define pud_index pud_index
86 #endif
87
88 #ifndef pgd_index
89 /* Must be a compile-time constant, so implement it as a macro */
90 #define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
91 #endif
92
93 #ifndef kernel_pte_init
kernel_pte_init(void * addr)94 static inline void kernel_pte_init(void *addr)
95 {
96 }
97 #define kernel_pte_init kernel_pte_init
98 #endif
99
100 #ifndef pmd_init
pmd_init(void * addr)101 static inline void pmd_init(void *addr)
102 {
103 }
104 #define pmd_init pmd_init
105 #endif
106
107 #ifndef pud_init
pud_init(void * addr)108 static inline void pud_init(void *addr)
109 {
110 }
111 #define pud_init pud_init
112 #endif
113
114 #ifndef pte_offset_kernel
pte_offset_kernel(pmd_t * pmd,unsigned long address)115 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
116 {
117 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
118 }
119 #define pte_offset_kernel pte_offset_kernel
120 #endif
121
122 #ifdef CONFIG_HIGHPTE
123 #define __pte_map(pmd, address) \
124 ((pte_t *)kmap_local_page(pmd_page(*(pmd))) + pte_index((address)))
125 #define pte_unmap(pte) do { \
126 kunmap_local((pte)); \
127 rcu_read_unlock(); \
128 } while (0)
129 #else
__pte_map(pmd_t * pmd,unsigned long address)130 static inline pte_t *__pte_map(pmd_t *pmd, unsigned long address)
131 {
132 return pte_offset_kernel(pmd, address);
133 }
pte_unmap(pte_t * pte)134 static inline void pte_unmap(pte_t *pte)
135 {
136 rcu_read_unlock();
137 }
138 #endif
139
140 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable);
141
142 /* Find an entry in the second-level page table.. */
143 #ifndef pmd_offset
pmd_offset(pud_t * pud,unsigned long address)144 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
145 {
146 return pud_pgtable(*pud) + pmd_index(address);
147 }
148 #define pmd_offset pmd_offset
149 #endif
150
151 #ifndef pud_offset
pud_offset(p4d_t * p4d,unsigned long address)152 static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
153 {
154 return p4d_pgtable(*p4d) + pud_index(address);
155 }
156 #define pud_offset pud_offset
157 #endif
158
pgd_offset_pgd(pgd_t * pgd,unsigned long address)159 static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address)
160 {
161 return (pgd + pgd_index(address));
162 };
163
164 /*
165 * a shortcut to get a pgd_t in a given mm
166 */
167 #ifndef pgd_offset
168 #define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
169 #endif
170
171 /*
172 * a shortcut which implies the use of the kernel's pgd, instead
173 * of a process's
174 */
175 #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
176
177 /*
178 * In many cases it is known that a virtual address is mapped at PMD or PTE
179 * level, so instead of traversing all the page table levels, we can get a
180 * pointer to the PMD entry in user or kernel page table or translate a virtual
181 * address to the pointer in the PTE in the kernel page tables with simple
182 * helpers.
183 */
pmd_off(struct mm_struct * mm,unsigned long va)184 static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va)
185 {
186 return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va);
187 }
188
pmd_off_k(unsigned long va)189 static inline pmd_t *pmd_off_k(unsigned long va)
190 {
191 return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va);
192 }
193
virt_to_kpte(unsigned long vaddr)194 static inline pte_t *virt_to_kpte(unsigned long vaddr)
195 {
196 pmd_t *pmd = pmd_off_k(vaddr);
197
198 return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
199 }
200
201 #ifndef pmd_young
pmd_young(pmd_t pmd)202 static inline int pmd_young(pmd_t pmd)
203 {
204 return 0;
205 }
206 #endif
207
208 #ifndef pmd_dirty
pmd_dirty(pmd_t pmd)209 static inline int pmd_dirty(pmd_t pmd)
210 {
211 return 0;
212 }
213 #endif
214
215 /*
216 * A facility to provide lazy MMU batching. This allows PTE updates and
217 * page invalidations to be delayed until a call to leave lazy MMU mode
218 * is issued. Some architectures may benefit from doing this, and it is
219 * beneficial for both shadow and direct mode hypervisors, which may batch
220 * the PTE updates which happen during this window. Note that using this
221 * interface requires that read hazards be removed from the code. A read
222 * hazard could result in the direct mode hypervisor case, since the actual
223 * write to the page tables may not yet have taken place, so reads though
224 * a raw PTE pointer after it has been modified are not guaranteed to be
225 * up to date.
226 *
227 * In the general case, no lock is guaranteed to be held between entry and exit
228 * of the lazy mode. So the implementation must assume preemption may be enabled
229 * and cpu migration is possible; it must take steps to be robust against this.
230 * (In practice, for user PTE updates, the appropriate page table lock(s) are
231 * held, but for kernel PTE updates, no lock is held). Nesting is not permitted
232 * and the mode cannot be used in interrupt context.
233 */
234 #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
235 #define arch_enter_lazy_mmu_mode() do {} while (0)
236 #define arch_leave_lazy_mmu_mode() do {} while (0)
237 #define arch_flush_lazy_mmu_mode() do {} while (0)
238 #endif
239
240 #ifndef pte_batch_hint
241 /**
242 * pte_batch_hint - Number of pages that can be added to batch without scanning.
243 * @ptep: Page table pointer for the entry.
244 * @pte: Page table entry.
245 *
246 * Some architectures know that a set of contiguous ptes all map the same
247 * contiguous memory with the same permissions. In this case, it can provide a
248 * hint to aid pte batching without the core code needing to scan every pte.
249 *
250 * An architecture implementation may ignore the PTE accessed state. Further,
251 * the dirty state must apply atomically to all the PTEs described by the hint.
252 *
253 * May be overridden by the architecture, else pte_batch_hint is always 1.
254 */
pte_batch_hint(pte_t * ptep,pte_t pte)255 static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte)
256 {
257 return 1;
258 }
259 #endif
260
261 #ifndef pte_advance_pfn
pte_advance_pfn(pte_t pte,unsigned long nr)262 static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr)
263 {
264 return __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT));
265 }
266 #endif
267
268 #define pte_next_pfn(pte) pte_advance_pfn(pte, 1)
269
270 #ifndef set_ptes
271 /**
272 * set_ptes - Map consecutive pages to a contiguous range of addresses.
273 * @mm: Address space to map the pages into.
274 * @addr: Address to map the first page at.
275 * @ptep: Page table pointer for the first entry.
276 * @pte: Page table entry for the first page.
277 * @nr: Number of pages to map.
278 *
279 * When nr==1, initial state of pte may be present or not present, and new state
280 * may be present or not present. When nr>1, initial state of all ptes must be
281 * not present, and new state must be present.
282 *
283 * May be overridden by the architecture, or the architecture can define
284 * set_pte() and PFN_PTE_SHIFT.
285 *
286 * Context: The caller holds the page table lock. The pages all belong
287 * to the same folio. The PTEs are all in the same PMD.
288 */
set_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned int nr)289 static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
290 pte_t *ptep, pte_t pte, unsigned int nr)
291 {
292 page_table_check_ptes_set(mm, ptep, pte, nr);
293
294 for (;;) {
295 set_pte(ptep, pte);
296 if (--nr == 0)
297 break;
298 ptep++;
299 pte = pte_next_pfn(pte);
300 }
301 }
302 #endif
303 #define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)
304
305 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
306 extern int ptep_set_access_flags(struct vm_area_struct *vma,
307 unsigned long address, pte_t *ptep,
308 pte_t entry, int dirty);
309 #endif
310
311 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
312 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
313 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
314 unsigned long address, pmd_t *pmdp,
315 pmd_t entry, int dirty);
316 extern int pudp_set_access_flags(struct vm_area_struct *vma,
317 unsigned long address, pud_t *pudp,
318 pud_t entry, int dirty);
319 #else
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t entry,int dirty)320 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
321 unsigned long address, pmd_t *pmdp,
322 pmd_t entry, int dirty)
323 {
324 BUILD_BUG();
325 return 0;
326 }
pudp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pud_t * pudp,pud_t entry,int dirty)327 static inline int pudp_set_access_flags(struct vm_area_struct *vma,
328 unsigned long address, pud_t *pudp,
329 pud_t entry, int dirty)
330 {
331 BUILD_BUG();
332 return 0;
333 }
334 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
335 #endif
336
337 #ifndef ptep_get
ptep_get(pte_t * ptep)338 static inline pte_t ptep_get(pte_t *ptep)
339 {
340 return READ_ONCE(*ptep);
341 }
342 #endif
343
344 #ifndef pmdp_get
pmdp_get(pmd_t * pmdp)345 static inline pmd_t pmdp_get(pmd_t *pmdp)
346 {
347 return READ_ONCE(*pmdp);
348 }
349 #endif
350
351 #ifndef pudp_get
pudp_get(pud_t * pudp)352 static inline pud_t pudp_get(pud_t *pudp)
353 {
354 return READ_ONCE(*pudp);
355 }
356 #endif
357
358 #ifndef p4dp_get
p4dp_get(p4d_t * p4dp)359 static inline p4d_t p4dp_get(p4d_t *p4dp)
360 {
361 return READ_ONCE(*p4dp);
362 }
363 #endif
364
365 #ifndef pgdp_get
pgdp_get(pgd_t * pgdp)366 static inline pgd_t pgdp_get(pgd_t *pgdp)
367 {
368 return READ_ONCE(*pgdp);
369 }
370 #endif
371
372 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)373 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
374 unsigned long address,
375 pte_t *ptep)
376 {
377 pte_t pte = ptep_get(ptep);
378 int r = 1;
379 if (!pte_young(pte))
380 r = 0;
381 else
382 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
383 return r;
384 }
385 #endif
386
387 #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
388 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)389 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
390 unsigned long address,
391 pmd_t *pmdp)
392 {
393 pmd_t pmd = *pmdp;
394 int r = 1;
395 if (!pmd_young(pmd))
396 r = 0;
397 else
398 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
399 return r;
400 }
401 #else
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)402 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
403 unsigned long address,
404 pmd_t *pmdp)
405 {
406 BUILD_BUG();
407 return 0;
408 }
409 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */
410 #endif
411
412 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
413 int ptep_clear_flush_young(struct vm_area_struct *vma,
414 unsigned long address, pte_t *ptep);
415 #endif
416
417 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
418 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
419 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
420 unsigned long address, pmd_t *pmdp);
421 #else
422 /*
423 * Despite relevant to THP only, this API is called from generic rmap code
424 * under PageTransHuge(), hence needs a dummy implementation for !THP
425 */
pmdp_clear_flush_young(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)426 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
427 unsigned long address, pmd_t *pmdp)
428 {
429 BUILD_BUG();
430 return 0;
431 }
432 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
433 #endif
434
435 #ifndef arch_has_hw_nonleaf_pmd_young
436 /*
437 * Return whether the accessed bit in non-leaf PMD entries is supported on the
438 * local CPU.
439 */
arch_has_hw_nonleaf_pmd_young(void)440 static inline bool arch_has_hw_nonleaf_pmd_young(void)
441 {
442 return IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG);
443 }
444 #endif
445
446 #ifndef arch_has_hw_pte_young
447 /*
448 * Return whether the accessed bit is supported on the local CPU.
449 *
450 * This stub assumes accessing through an old PTE triggers a page fault.
451 * Architectures that automatically set the access bit should overwrite it.
452 */
arch_has_hw_pte_young(void)453 static inline bool arch_has_hw_pte_young(void)
454 {
455 return IS_ENABLED(CONFIG_ARCH_HAS_HW_PTE_YOUNG);
456 }
457 #endif
458
459 #ifndef exec_folio_order
460 /*
461 * Returns preferred minimum folio order for executable file-backed memory. Must
462 * be in range [0, PMD_ORDER). Default to order-0.
463 */
exec_folio_order(void)464 static inline unsigned int exec_folio_order(void)
465 {
466 return 0;
467 }
468 #endif
469
470 #ifndef arch_check_zapped_pte
arch_check_zapped_pte(struct vm_area_struct * vma,pte_t pte)471 static inline void arch_check_zapped_pte(struct vm_area_struct *vma,
472 pte_t pte)
473 {
474 }
475 #endif
476
477 #ifndef arch_check_zapped_pmd
arch_check_zapped_pmd(struct vm_area_struct * vma,pmd_t pmd)478 static inline void arch_check_zapped_pmd(struct vm_area_struct *vma,
479 pmd_t pmd)
480 {
481 }
482 #endif
483
484 #ifndef arch_check_zapped_pud
arch_check_zapped_pud(struct vm_area_struct * vma,pud_t pud)485 static inline void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud)
486 {
487 }
488 #endif
489
490 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long address,pte_t * ptep)491 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
492 unsigned long address,
493 pte_t *ptep)
494 {
495 pte_t pte = ptep_get(ptep);
496 pte_clear(mm, address, ptep);
497 page_table_check_pte_clear(mm, pte);
498 return pte;
499 }
500 #endif
501
502 #ifndef clear_young_dirty_ptes
503 /**
504 * clear_young_dirty_ptes - Mark PTEs that map consecutive pages of the
505 * same folio as old/clean.
506 * @mm: Address space the pages are mapped into.
507 * @addr: Address the first page is mapped at.
508 * @ptep: Page table pointer for the first entry.
509 * @nr: Number of entries to mark old/clean.
510 * @flags: Flags to modify the PTE batch semantics.
511 *
512 * May be overridden by the architecture; otherwise, implemented by
513 * get_and_clear/modify/set for each pte in the range.
514 *
515 * Note that PTE bits in the PTE range besides the PFN can differ. For example,
516 * some PTEs might be write-protected.
517 *
518 * Context: The caller holds the page table lock. The PTEs map consecutive
519 * pages that belong to the same folio. The PTEs are all in the same PMD.
520 */
clear_young_dirty_ptes(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,unsigned int nr,cydp_t flags)521 static inline void clear_young_dirty_ptes(struct vm_area_struct *vma,
522 unsigned long addr, pte_t *ptep,
523 unsigned int nr, cydp_t flags)
524 {
525 pte_t pte;
526
527 for (;;) {
528 if (flags == CYDP_CLEAR_YOUNG)
529 ptep_test_and_clear_young(vma, addr, ptep);
530 else {
531 pte = ptep_get_and_clear(vma->vm_mm, addr, ptep);
532 if (flags & CYDP_CLEAR_YOUNG)
533 pte = pte_mkold(pte);
534 if (flags & CYDP_CLEAR_DIRTY)
535 pte = pte_mkclean(pte);
536 set_pte_at(vma->vm_mm, addr, ptep, pte);
537 }
538 if (--nr == 0)
539 break;
540 ptep++;
541 addr += PAGE_SIZE;
542 }
543 }
544 #endif
545
ptep_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)546 static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
547 pte_t *ptep)
548 {
549 pte_t pte = ptep_get(ptep);
550
551 pte_clear(mm, addr, ptep);
552 /*
553 * No need for ptep_get_and_clear(): page table check doesn't care about
554 * any bits that could have been set by HW concurrently.
555 */
556 page_table_check_pte_clear(mm, pte);
557 }
558
559 #ifdef CONFIG_GUP_GET_PXX_LOW_HIGH
560 /*
561 * For walking the pagetables without holding any locks. Some architectures
562 * (eg x86-32 PAE) cannot load the entries atomically without using expensive
563 * instructions. We are guaranteed that a PTE will only either go from not
564 * present to present, or present to not present -- it will not switch to a
565 * completely different present page without a TLB flush inbetween; which we
566 * are blocking by holding interrupts off.
567 *
568 * Setting ptes from not present to present goes:
569 *
570 * ptep->pte_high = h;
571 * smp_wmb();
572 * ptep->pte_low = l;
573 *
574 * And present to not present goes:
575 *
576 * ptep->pte_low = 0;
577 * smp_wmb();
578 * ptep->pte_high = 0;
579 *
580 * We must ensure here that the load of pte_low sees 'l' IFF pte_high sees 'h'.
581 * We load pte_high *after* loading pte_low, which ensures we don't see an older
582 * value of pte_high. *Then* we recheck pte_low, which ensures that we haven't
583 * picked up a changed pte high. We might have gotten rubbish values from
584 * pte_low and pte_high, but we are guaranteed that pte_low will not have the
585 * present bit set *unless* it is 'l'. Because get_user_pages_fast() only
586 * operates on present ptes we're safe.
587 */
ptep_get_lockless(pte_t * ptep)588 static inline pte_t ptep_get_lockless(pte_t *ptep)
589 {
590 pte_t pte;
591
592 do {
593 pte.pte_low = ptep->pte_low;
594 smp_rmb();
595 pte.pte_high = ptep->pte_high;
596 smp_rmb();
597 } while (unlikely(pte.pte_low != ptep->pte_low));
598
599 return pte;
600 }
601 #define ptep_get_lockless ptep_get_lockless
602
603 #if CONFIG_PGTABLE_LEVELS > 2
pmdp_get_lockless(pmd_t * pmdp)604 static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
605 {
606 pmd_t pmd;
607
608 do {
609 pmd.pmd_low = pmdp->pmd_low;
610 smp_rmb();
611 pmd.pmd_high = pmdp->pmd_high;
612 smp_rmb();
613 } while (unlikely(pmd.pmd_low != pmdp->pmd_low));
614
615 return pmd;
616 }
617 #define pmdp_get_lockless pmdp_get_lockless
618 #define pmdp_get_lockless_sync() tlb_remove_table_sync_one()
619 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
620 #endif /* CONFIG_GUP_GET_PXX_LOW_HIGH */
621
622 /*
623 * We require that the PTE can be read atomically.
624 */
625 #ifndef ptep_get_lockless
ptep_get_lockless(pte_t * ptep)626 static inline pte_t ptep_get_lockless(pte_t *ptep)
627 {
628 return ptep_get(ptep);
629 }
630 #endif
631
632 #ifndef pmdp_get_lockless
pmdp_get_lockless(pmd_t * pmdp)633 static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
634 {
635 return pmdp_get(pmdp);
636 }
pmdp_get_lockless_sync(void)637 static inline void pmdp_get_lockless_sync(void)
638 {
639 }
640 #endif
641
642 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
643 #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)644 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
645 unsigned long address,
646 pmd_t *pmdp)
647 {
648 pmd_t pmd = *pmdp;
649
650 pmd_clear(pmdp);
651 page_table_check_pmd_clear(mm, pmd);
652
653 return pmd;
654 }
655 #endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */
656 #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
pudp_huge_get_and_clear(struct mm_struct * mm,unsigned long address,pud_t * pudp)657 static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
658 unsigned long address,
659 pud_t *pudp)
660 {
661 pud_t pud = *pudp;
662
663 pud_clear(pudp);
664 page_table_check_pud_clear(mm, pud);
665
666 return pud;
667 }
668 #endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */
669 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
670
671 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
672 #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
pmdp_huge_get_and_clear_full(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,int full)673 static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
674 unsigned long address, pmd_t *pmdp,
675 int full)
676 {
677 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
678 }
679 #endif
680
681 #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
pudp_huge_get_and_clear_full(struct vm_area_struct * vma,unsigned long address,pud_t * pudp,int full)682 static inline pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma,
683 unsigned long address, pud_t *pudp,
684 int full)
685 {
686 return pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
687 }
688 #endif
689 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
690
691 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
ptep_get_and_clear_full(struct mm_struct * mm,unsigned long address,pte_t * ptep,int full)692 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
693 unsigned long address, pte_t *ptep,
694 int full)
695 {
696 return ptep_get_and_clear(mm, address, ptep);
697 }
698 #endif
699
700 #ifndef get_and_clear_full_ptes
701 /**
702 * get_and_clear_full_ptes - Clear present PTEs that map consecutive pages of
703 * the same folio, collecting dirty/accessed bits.
704 * @mm: Address space the pages are mapped into.
705 * @addr: Address the first page is mapped at.
706 * @ptep: Page table pointer for the first entry.
707 * @nr: Number of entries to clear.
708 * @full: Whether we are clearing a full mm.
709 *
710 * May be overridden by the architecture; otherwise, implemented as a simple
711 * loop over ptep_get_and_clear_full(), merging dirty/accessed bits into the
712 * returned PTE.
713 *
714 * Note that PTE bits in the PTE range besides the PFN can differ. For example,
715 * some PTEs might be write-protected.
716 *
717 * Context: The caller holds the page table lock. The PTEs map consecutive
718 * pages that belong to the same folio. The PTEs are all in the same PMD.
719 */
get_and_clear_full_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr,int full)720 static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm,
721 unsigned long addr, pte_t *ptep, unsigned int nr, int full)
722 {
723 pte_t pte, tmp_pte;
724
725 pte = ptep_get_and_clear_full(mm, addr, ptep, full);
726 while (--nr) {
727 ptep++;
728 addr += PAGE_SIZE;
729 tmp_pte = ptep_get_and_clear_full(mm, addr, ptep, full);
730 if (pte_dirty(tmp_pte))
731 pte = pte_mkdirty(pte);
732 if (pte_young(tmp_pte))
733 pte = pte_mkyoung(pte);
734 }
735 return pte;
736 }
737 #endif
738
739 /**
740 * get_and_clear_ptes - Clear present PTEs that map consecutive pages of
741 * the same folio, collecting dirty/accessed bits.
742 * @mm: Address space the pages are mapped into.
743 * @addr: Address the first page is mapped at.
744 * @ptep: Page table pointer for the first entry.
745 * @nr: Number of entries to clear.
746 *
747 * Use this instead of get_and_clear_full_ptes() if it is known that we don't
748 * need to clear the full mm, which is mostly the case.
749 *
750 * Note that PTE bits in the PTE range besides the PFN can differ. For example,
751 * some PTEs might be write-protected.
752 *
753 * Context: The caller holds the page table lock. The PTEs map consecutive
754 * pages that belong to the same folio. The PTEs are all in the same PMD.
755 */
get_and_clear_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr)756 static inline pte_t get_and_clear_ptes(struct mm_struct *mm, unsigned long addr,
757 pte_t *ptep, unsigned int nr)
758 {
759 return get_and_clear_full_ptes(mm, addr, ptep, nr, 0);
760 }
761
762 #ifndef clear_full_ptes
763 /**
764 * clear_full_ptes - Clear present PTEs that map consecutive pages of the same
765 * folio.
766 * @mm: Address space the pages are mapped into.
767 * @addr: Address the first page is mapped at.
768 * @ptep: Page table pointer for the first entry.
769 * @nr: Number of entries to clear.
770 * @full: Whether we are clearing a full mm.
771 *
772 * May be overridden by the architecture; otherwise, implemented as a simple
773 * loop over ptep_get_and_clear_full().
774 *
775 * Note that PTE bits in the PTE range besides the PFN can differ. For example,
776 * some PTEs might be write-protected.
777 *
778 * Context: The caller holds the page table lock. The PTEs map consecutive
779 * pages that belong to the same folio. The PTEs are all in the same PMD.
780 */
clear_full_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr,int full)781 static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr,
782 pte_t *ptep, unsigned int nr, int full)
783 {
784 for (;;) {
785 ptep_get_and_clear_full(mm, addr, ptep, full);
786 if (--nr == 0)
787 break;
788 ptep++;
789 addr += PAGE_SIZE;
790 }
791 }
792 #endif
793
794 /**
795 * clear_ptes - Clear present PTEs that map consecutive pages of the same folio.
796 * @mm: Address space the pages are mapped into.
797 * @addr: Address the first page is mapped at.
798 * @ptep: Page table pointer for the first entry.
799 * @nr: Number of entries to clear.
800 *
801 * Use this instead of clear_full_ptes() if it is known that we don't need to
802 * clear the full mm, which is mostly the case.
803 *
804 * Note that PTE bits in the PTE range besides the PFN can differ. For example,
805 * some PTEs might be write-protected.
806 *
807 * Context: The caller holds the page table lock. The PTEs map consecutive
808 * pages that belong to the same folio. The PTEs are all in the same PMD.
809 */
clear_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr)810 static inline void clear_ptes(struct mm_struct *mm, unsigned long addr,
811 pte_t *ptep, unsigned int nr)
812 {
813 clear_full_ptes(mm, addr, ptep, nr, 0);
814 }
815
816 /*
817 * If two threads concurrently fault at the same page, the thread that
818 * won the race updates the PTE and its local TLB/Cache. The other thread
819 * gives up, simply does nothing, and continues; on architectures where
820 * software can update TLB, local TLB can be updated here to avoid next page
821 * fault. This function updates TLB only, do nothing with cache or others.
822 * It is the difference with function update_mmu_cache.
823 */
824 #ifndef update_mmu_tlb_range
update_mmu_tlb_range(struct vm_area_struct * vma,unsigned long address,pte_t * ptep,unsigned int nr)825 static inline void update_mmu_tlb_range(struct vm_area_struct *vma,
826 unsigned long address, pte_t *ptep, unsigned int nr)
827 {
828 }
829 #endif
830
update_mmu_tlb(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)831 static inline void update_mmu_tlb(struct vm_area_struct *vma,
832 unsigned long address, pte_t *ptep)
833 {
834 update_mmu_tlb_range(vma, address, ptep, 1);
835 }
836
837 /*
838 * Some architectures may be able to avoid expensive synchronization
839 * primitives when modifications are made to PTE's which are already
840 * not present, or in the process of an address space destruction.
841 */
842 #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
pte_clear_not_present_full(struct mm_struct * mm,unsigned long address,pte_t * ptep,int full)843 static inline void pte_clear_not_present_full(struct mm_struct *mm,
844 unsigned long address,
845 pte_t *ptep,
846 int full)
847 {
848 pte_clear(mm, address, ptep);
849 }
850 #endif
851
852 #ifndef clear_not_present_full_ptes
853 /**
854 * clear_not_present_full_ptes - Clear multiple not present PTEs which are
855 * consecutive in the pgtable.
856 * @mm: Address space the ptes represent.
857 * @addr: Address of the first pte.
858 * @ptep: Page table pointer for the first entry.
859 * @nr: Number of entries to clear.
860 * @full: Whether we are clearing a full mm.
861 *
862 * May be overridden by the architecture; otherwise, implemented as a simple
863 * loop over pte_clear_not_present_full().
864 *
865 * Context: The caller holds the page table lock. The PTEs are all not present.
866 * The PTEs are all in the same PMD.
867 */
clear_not_present_full_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr,int full)868 static inline void clear_not_present_full_ptes(struct mm_struct *mm,
869 unsigned long addr, pte_t *ptep, unsigned int nr, int full)
870 {
871 for (;;) {
872 pte_clear_not_present_full(mm, addr, ptep, full);
873 if (--nr == 0)
874 break;
875 ptep++;
876 addr += PAGE_SIZE;
877 }
878 }
879 #endif
880
881 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
882 extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
883 unsigned long address,
884 pte_t *ptep);
885 #endif
886
887 #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
888 extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
889 unsigned long address,
890 pmd_t *pmdp);
891 extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
892 unsigned long address,
893 pud_t *pudp);
894 #endif
895
896 #ifndef pte_mkwrite
pte_mkwrite(pte_t pte,struct vm_area_struct * vma)897 static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
898 {
899 return pte_mkwrite_novma(pte);
900 }
901 #endif
902
903 #if defined(CONFIG_ARCH_WANT_PMD_MKWRITE) && !defined(pmd_mkwrite)
pmd_mkwrite(pmd_t pmd,struct vm_area_struct * vma)904 static inline pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
905 {
906 return pmd_mkwrite_novma(pmd);
907 }
908 #endif
909
910 #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
911 struct mm_struct;
ptep_set_wrprotect(struct mm_struct * mm,unsigned long address,pte_t * ptep)912 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
913 {
914 pte_t old_pte = ptep_get(ptep);
915 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
916 }
917 #endif
918
919 #ifndef wrprotect_ptes
920 /**
921 * wrprotect_ptes - Write-protect PTEs that map consecutive pages of the same
922 * folio.
923 * @mm: Address space the pages are mapped into.
924 * @addr: Address the first page is mapped at.
925 * @ptep: Page table pointer for the first entry.
926 * @nr: Number of entries to write-protect.
927 *
928 * May be overridden by the architecture; otherwise, implemented as a simple
929 * loop over ptep_set_wrprotect().
930 *
931 * Note that PTE bits in the PTE range besides the PFN can differ. For example,
932 * some PTEs might be write-protected.
933 *
934 * Context: The caller holds the page table lock. The PTEs map consecutive
935 * pages that belong to the same folio. The PTEs are all in the same PMD.
936 */
wrprotect_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned int nr)937 static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
938 pte_t *ptep, unsigned int nr)
939 {
940 for (;;) {
941 ptep_set_wrprotect(mm, addr, ptep);
942 if (--nr == 0)
943 break;
944 ptep++;
945 addr += PAGE_SIZE;
946 }
947 }
948 #endif
949
950 /*
951 * On some architectures hardware does not set page access bit when accessing
952 * memory page, it is responsibility of software setting this bit. It brings
953 * out extra page fault penalty to track page access bit. For optimization page
954 * access bit can be set during all page fault flow on these arches.
955 * To be differentiate with macro pte_mkyoung, this macro is used on platforms
956 * where software maintains page access bit.
957 */
958 #ifndef pte_sw_mkyoung
pte_sw_mkyoung(pte_t pte)959 static inline pte_t pte_sw_mkyoung(pte_t pte)
960 {
961 return pte;
962 }
963 #define pte_sw_mkyoung pte_sw_mkyoung
964 #endif
965
966 #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
967 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)968 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
969 unsigned long address, pmd_t *pmdp)
970 {
971 pmd_t old_pmd = *pmdp;
972 set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
973 }
974 #else
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)975 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
976 unsigned long address, pmd_t *pmdp)
977 {
978 BUILD_BUG();
979 }
980 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
981 #endif
982 #ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
983 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
984 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pudp_set_wrprotect(struct mm_struct * mm,unsigned long address,pud_t * pudp)985 static inline void pudp_set_wrprotect(struct mm_struct *mm,
986 unsigned long address, pud_t *pudp)
987 {
988 pud_t old_pud = *pudp;
989
990 set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
991 }
992 #else
pudp_set_wrprotect(struct mm_struct * mm,unsigned long address,pud_t * pudp)993 static inline void pudp_set_wrprotect(struct mm_struct *mm,
994 unsigned long address, pud_t *pudp)
995 {
996 BUILD_BUG();
997 }
998 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
999 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1000 #endif
1001
1002 #ifndef pmdp_collapse_flush
1003 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1004 extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1005 unsigned long address, pmd_t *pmdp);
1006 #else
pmdp_collapse_flush(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)1007 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1008 unsigned long address,
1009 pmd_t *pmdp)
1010 {
1011 BUILD_BUG();
1012 return *pmdp;
1013 }
1014 #define pmdp_collapse_flush pmdp_collapse_flush
1015 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1016 #endif
1017
1018 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
1019 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1020 pgtable_t pgtable);
1021 #endif
1022
1023 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
1024 extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1025 #endif
1026
1027 #ifndef arch_needs_pgtable_deposit
1028 #define arch_needs_pgtable_deposit() (false)
1029 #endif
1030
1031 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1032 /*
1033 * This is an implementation of pmdp_establish() that is only suitable for an
1034 * architecture that doesn't have hardware dirty/accessed bits. In this case we
1035 * can't race with CPU which sets these bits and non-atomic approach is fine.
1036 */
generic_pmdp_establish(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t pmd)1037 static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
1038 unsigned long address, pmd_t *pmdp, pmd_t pmd)
1039 {
1040 pmd_t old_pmd = *pmdp;
1041 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
1042 return old_pmd;
1043 }
1044 #endif
1045
1046 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
1047 extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
1048 pmd_t *pmdp);
1049 #endif
1050
1051 #ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD
1052
1053 /*
1054 * pmdp_invalidate_ad() invalidates the PMD while changing a transparent
1055 * hugepage mapping in the page tables. This function is similar to
1056 * pmdp_invalidate(), but should only be used if the access and dirty bits would
1057 * not be cleared by the software in the new PMD value. The function ensures
1058 * that hardware changes of the access and dirty bits updates would not be lost.
1059 *
1060 * Doing so can allow in certain architectures to avoid a TLB flush in most
1061 * cases. Yet, another TLB flush might be necessary later if the PMD update
1062 * itself requires such flush (e.g., if protection was set to be stricter). Yet,
1063 * even when a TLB flush is needed because of the update, the caller may be able
1064 * to batch these TLB flushing operations, so fewer TLB flush operations are
1065 * needed.
1066 */
1067 extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
1068 unsigned long address, pmd_t *pmdp);
1069 #endif
1070
1071 #ifndef __HAVE_ARCH_PTE_SAME
pte_same(pte_t pte_a,pte_t pte_b)1072 static inline int pte_same(pte_t pte_a, pte_t pte_b)
1073 {
1074 return pte_val(pte_a) == pte_val(pte_b);
1075 }
1076 #endif
1077
1078 #ifndef __HAVE_ARCH_PTE_UNUSED
1079 /*
1080 * Some architectures provide facilities to virtualization guests
1081 * so that they can flag allocated pages as unused. This allows the
1082 * host to transparently reclaim unused pages. This function returns
1083 * whether the pte's page is unused.
1084 */
pte_unused(pte_t pte)1085 static inline int pte_unused(pte_t pte)
1086 {
1087 return 0;
1088 }
1089 #endif
1090
1091 #ifndef pte_access_permitted
1092 #define pte_access_permitted(pte, write) \
1093 (pte_present(pte) && (!(write) || pte_write(pte)))
1094 #endif
1095
1096 #ifndef pmd_access_permitted
1097 #define pmd_access_permitted(pmd, write) \
1098 (pmd_present(pmd) && (!(write) || pmd_write(pmd)))
1099 #endif
1100
1101 #ifndef pud_access_permitted
1102 #define pud_access_permitted(pud, write) \
1103 (pud_present(pud) && (!(write) || pud_write(pud)))
1104 #endif
1105
1106 #ifndef p4d_access_permitted
1107 #define p4d_access_permitted(p4d, write) \
1108 (p4d_present(p4d) && (!(write) || p4d_write(p4d)))
1109 #endif
1110
1111 #ifndef pgd_access_permitted
1112 #define pgd_access_permitted(pgd, write) \
1113 (pgd_present(pgd) && (!(write) || pgd_write(pgd)))
1114 #endif
1115
1116 #ifndef __HAVE_ARCH_PMD_SAME
pmd_same(pmd_t pmd_a,pmd_t pmd_b)1117 static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
1118 {
1119 return pmd_val(pmd_a) == pmd_val(pmd_b);
1120 }
1121 #endif
1122
1123 #ifndef pud_same
pud_same(pud_t pud_a,pud_t pud_b)1124 static inline int pud_same(pud_t pud_a, pud_t pud_b)
1125 {
1126 return pud_val(pud_a) == pud_val(pud_b);
1127 }
1128 #define pud_same pud_same
1129 #endif
1130
1131 #ifndef __HAVE_ARCH_P4D_SAME
p4d_same(p4d_t p4d_a,p4d_t p4d_b)1132 static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
1133 {
1134 return p4d_val(p4d_a) == p4d_val(p4d_b);
1135 }
1136 #endif
1137
1138 #ifndef __HAVE_ARCH_PGD_SAME
pgd_same(pgd_t pgd_a,pgd_t pgd_b)1139 static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
1140 {
1141 return pgd_val(pgd_a) == pgd_val(pgd_b);
1142 }
1143 #endif
1144
1145 #ifndef __HAVE_ARCH_DO_SWAP_PAGE
arch_do_swap_page_nr(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pte_t pte,pte_t oldpte,int nr)1146 static inline void arch_do_swap_page_nr(struct mm_struct *mm,
1147 struct vm_area_struct *vma,
1148 unsigned long addr,
1149 pte_t pte, pte_t oldpte,
1150 int nr)
1151 {
1152
1153 }
1154 #else
1155 /*
1156 * Some architectures support metadata associated with a page. When a
1157 * page is being swapped out, this metadata must be saved so it can be
1158 * restored when the page is swapped back in. SPARC M7 and newer
1159 * processors support an ADI (Application Data Integrity) tag for the
1160 * page as metadata for the page. arch_do_swap_page() can restore this
1161 * metadata when a page is swapped back in.
1162 */
arch_do_swap_page_nr(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pte_t pte,pte_t oldpte,int nr)1163 static inline void arch_do_swap_page_nr(struct mm_struct *mm,
1164 struct vm_area_struct *vma,
1165 unsigned long addr,
1166 pte_t pte, pte_t oldpte,
1167 int nr)
1168 {
1169 for (int i = 0; i < nr; i++) {
1170 arch_do_swap_page(vma->vm_mm, vma, addr + i * PAGE_SIZE,
1171 pte_advance_pfn(pte, i),
1172 pte_advance_pfn(oldpte, i));
1173 }
1174 }
1175 #endif
1176
1177 #ifndef __HAVE_ARCH_UNMAP_ONE
1178 /*
1179 * Some architectures support metadata associated with a page. When a
1180 * page is being swapped out, this metadata must be saved so it can be
1181 * restored when the page is swapped back in. SPARC M7 and newer
1182 * processors support an ADI (Application Data Integrity) tag for the
1183 * page as metadata for the page. arch_unmap_one() can save this
1184 * metadata on a swap-out of a page.
1185 */
arch_unmap_one(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pte_t orig_pte)1186 static inline int arch_unmap_one(struct mm_struct *mm,
1187 struct vm_area_struct *vma,
1188 unsigned long addr,
1189 pte_t orig_pte)
1190 {
1191 return 0;
1192 }
1193 #endif
1194
1195 /*
1196 * Allow architectures to preserve additional metadata associated with
1197 * swapped-out pages. The corresponding __HAVE_ARCH_SWAP_* macros and function
1198 * prototypes must be defined in the arch-specific asm/pgtable.h file.
1199 */
1200 #ifndef __HAVE_ARCH_PREPARE_TO_SWAP
arch_prepare_to_swap(struct folio * folio)1201 static inline int arch_prepare_to_swap(struct folio *folio)
1202 {
1203 return 0;
1204 }
1205 #endif
1206
1207 #ifndef __HAVE_ARCH_SWAP_INVALIDATE
arch_swap_invalidate_page(int type,pgoff_t offset)1208 static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
1209 {
1210 }
1211
arch_swap_invalidate_area(int type)1212 static inline void arch_swap_invalidate_area(int type)
1213 {
1214 }
1215 #endif
1216
1217 #ifndef __HAVE_ARCH_SWAP_RESTORE
arch_swap_restore(swp_entry_t entry,struct folio * folio)1218 static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
1219 {
1220 }
1221 #endif
1222
1223 #ifndef __HAVE_ARCH_MOVE_PTE
1224 #define move_pte(pte, old_addr, new_addr) (pte)
1225 #endif
1226
1227 #ifndef pte_accessible
1228 # define pte_accessible(mm, pte) ((void)(pte), 1)
1229 #endif
1230
1231 #ifndef flush_tlb_fix_spurious_fault
1232 #define flush_tlb_fix_spurious_fault(vma, address, ptep) flush_tlb_page(vma, address)
1233 #endif
1234
1235 /*
1236 * When walking page tables, get the address of the next boundary,
1237 * or the end address of the range if that comes earlier. Although no
1238 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
1239 */
1240
1241 #define pgd_addr_end(addr, end) \
1242 ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
1243 (__boundary - 1 < (end) - 1)? __boundary: (end); \
1244 })
1245
1246 #ifndef p4d_addr_end
1247 #define p4d_addr_end(addr, end) \
1248 ({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \
1249 (__boundary - 1 < (end) - 1)? __boundary: (end); \
1250 })
1251 #endif
1252
1253 #ifndef pud_addr_end
1254 #define pud_addr_end(addr, end) \
1255 ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
1256 (__boundary - 1 < (end) - 1)? __boundary: (end); \
1257 })
1258 #endif
1259
1260 #ifndef pmd_addr_end
1261 #define pmd_addr_end(addr, end) \
1262 ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
1263 (__boundary - 1 < (end) - 1)? __boundary: (end); \
1264 })
1265 #endif
1266
1267 /*
1268 * When walking page tables, we usually want to skip any p?d_none entries;
1269 * and any p?d_bad entries - reporting the error before resetting to none.
1270 * Do the tests inline, but report and clear the bad entry in mm/memory.c.
1271 */
1272 void pgd_clear_bad(pgd_t *);
1273
1274 #ifndef __PAGETABLE_P4D_FOLDED
1275 void p4d_clear_bad(p4d_t *);
1276 #else
1277 #define p4d_clear_bad(p4d) do { } while (0)
1278 #endif
1279
1280 #ifndef __PAGETABLE_PUD_FOLDED
1281 void pud_clear_bad(pud_t *);
1282 #else
1283 #define pud_clear_bad(p4d) do { } while (0)
1284 #endif
1285
1286 void pmd_clear_bad(pmd_t *);
1287
pgd_none_or_clear_bad(pgd_t * pgd)1288 static inline int pgd_none_or_clear_bad(pgd_t *pgd)
1289 {
1290 if (pgd_none(*pgd))
1291 return 1;
1292 if (unlikely(pgd_bad(*pgd))) {
1293 pgd_clear_bad(pgd);
1294 return 1;
1295 }
1296 return 0;
1297 }
1298
p4d_none_or_clear_bad(p4d_t * p4d)1299 static inline int p4d_none_or_clear_bad(p4d_t *p4d)
1300 {
1301 if (p4d_none(*p4d))
1302 return 1;
1303 if (unlikely(p4d_bad(*p4d))) {
1304 p4d_clear_bad(p4d);
1305 return 1;
1306 }
1307 return 0;
1308 }
1309
pud_none_or_clear_bad(pud_t * pud)1310 static inline int pud_none_or_clear_bad(pud_t *pud)
1311 {
1312 if (pud_none(*pud))
1313 return 1;
1314 if (unlikely(pud_bad(*pud))) {
1315 pud_clear_bad(pud);
1316 return 1;
1317 }
1318 return 0;
1319 }
1320
pmd_none_or_clear_bad(pmd_t * pmd)1321 static inline int pmd_none_or_clear_bad(pmd_t *pmd)
1322 {
1323 if (pmd_none(*pmd))
1324 return 1;
1325 if (unlikely(pmd_bad(*pmd))) {
1326 pmd_clear_bad(pmd);
1327 return 1;
1328 }
1329 return 0;
1330 }
1331
__ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1332 static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma,
1333 unsigned long addr,
1334 pte_t *ptep)
1335 {
1336 /*
1337 * Get the current pte state, but zero it out to make it
1338 * non-present, preventing the hardware from asynchronously
1339 * updating it.
1340 */
1341 return ptep_get_and_clear(vma->vm_mm, addr, ptep);
1342 }
1343
__ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t pte)1344 static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma,
1345 unsigned long addr,
1346 pte_t *ptep, pte_t pte)
1347 {
1348 /*
1349 * The pte is non-present, so there's no hardware state to
1350 * preserve.
1351 */
1352 set_pte_at(vma->vm_mm, addr, ptep, pte);
1353 }
1354
1355 #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1356 /*
1357 * Start a pte protection read-modify-write transaction, which
1358 * protects against asynchronous hardware modifications to the pte.
1359 * The intention is not to prevent the hardware from making pte
1360 * updates, but to prevent any updates it may make from being lost.
1361 *
1362 * This does not protect against other software modifications of the
1363 * pte; the appropriate pte lock must be held over the transaction.
1364 *
1365 * Note that this interface is intended to be batchable, meaning that
1366 * ptep_modify_prot_commit may not actually update the pte, but merely
1367 * queue the update to be done at some later time. The update must be
1368 * actually committed before the pte lock is released, however.
1369 */
ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1370 static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
1371 unsigned long addr,
1372 pte_t *ptep)
1373 {
1374 return __ptep_modify_prot_start(vma, addr, ptep);
1375 }
1376
1377 /*
1378 * Commit an update to a pte, leaving any hardware-controlled bits in
1379 * the PTE unmodified. The pte returned from ptep_modify_prot_start() may
1380 * additionally have young and/or dirty bits set where previously they were not,
1381 * so the updated pte may have these additional changes.
1382 */
ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)1383 static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
1384 unsigned long addr,
1385 pte_t *ptep, pte_t old_pte, pte_t pte)
1386 {
1387 __ptep_modify_prot_commit(vma, addr, ptep, pte);
1388 }
1389 #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
1390
1391 /**
1392 * modify_prot_start_ptes - Start a pte protection read-modify-write transaction
1393 * over a batch of ptes, which protects against asynchronous hardware
1394 * modifications to the ptes. The intention is not to prevent the hardware from
1395 * making pte updates, but to prevent any updates it may make from being lost.
1396 * Please see the comment above ptep_modify_prot_start() for full description.
1397 *
1398 * @vma: The virtual memory area the pages are mapped into.
1399 * @addr: Address the first page is mapped at.
1400 * @ptep: Page table pointer for the first entry.
1401 * @nr: Number of entries.
1402 *
1403 * May be overridden by the architecture; otherwise, implemented as a simple
1404 * loop over ptep_modify_prot_start(), collecting the a/d bits from each pte
1405 * in the batch.
1406 *
1407 * Note that PTE bits in the PTE batch besides the PFN can differ.
1408 *
1409 * Context: The caller holds the page table lock. The PTEs map consecutive
1410 * pages that belong to the same folio. All other PTE bits must be identical for
1411 * all PTEs in the batch except for young and dirty bits. The PTEs are all in
1412 * the same PMD.
1413 */
1414 #ifndef modify_prot_start_ptes
modify_prot_start_ptes(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,unsigned int nr)1415 static inline pte_t modify_prot_start_ptes(struct vm_area_struct *vma,
1416 unsigned long addr, pte_t *ptep, unsigned int nr)
1417 {
1418 pte_t pte, tmp_pte;
1419
1420 pte = ptep_modify_prot_start(vma, addr, ptep);
1421 while (--nr) {
1422 ptep++;
1423 addr += PAGE_SIZE;
1424 tmp_pte = ptep_modify_prot_start(vma, addr, ptep);
1425 if (pte_dirty(tmp_pte))
1426 pte = pte_mkdirty(pte);
1427 if (pte_young(tmp_pte))
1428 pte = pte_mkyoung(pte);
1429 }
1430 return pte;
1431 }
1432 #endif
1433
1434 /**
1435 * modify_prot_commit_ptes - Commit an update to a batch of ptes, leaving any
1436 * hardware-controlled bits in the PTE unmodified.
1437 *
1438 * @vma: The virtual memory area the pages are mapped into.
1439 * @addr: Address the first page is mapped at.
1440 * @ptep: Page table pointer for the first entry.
1441 * @old_pte: Old page table entry (for the first entry) which is now cleared.
1442 * @pte: New page table entry to be set.
1443 * @nr: Number of entries.
1444 *
1445 * May be overridden by the architecture; otherwise, implemented as a simple
1446 * loop over ptep_modify_prot_commit().
1447 *
1448 * Context: The caller holds the page table lock. The PTEs are all in the same
1449 * PMD. On exit, the set ptes in the batch map the same folio. The ptes set by
1450 * ptep_modify_prot_start() may additionally have young and/or dirty bits set
1451 * where previously they were not, so the updated ptes may have these
1452 * additional changes.
1453 */
1454 #ifndef modify_prot_commit_ptes
modify_prot_commit_ptes(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte,unsigned int nr)1455 static inline void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr,
1456 pte_t *ptep, pte_t old_pte, pte_t pte, unsigned int nr)
1457 {
1458 int i;
1459
1460 for (i = 0; i < nr; ++i, ++ptep, addr += PAGE_SIZE) {
1461 ptep_modify_prot_commit(vma, addr, ptep, old_pte, pte);
1462
1463 /* Advance PFN only, set same prot */
1464 old_pte = pte_next_pfn(old_pte);
1465 pte = pte_next_pfn(pte);
1466 }
1467 }
1468 #endif
1469
1470 /*
1471 * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
1472 * and let generic vmalloc, ioremap and page table update code know when
1473 * arch_sync_kernel_mappings() needs to be called.
1474 */
1475 #ifndef ARCH_PAGE_TABLE_SYNC_MASK
1476 #define ARCH_PAGE_TABLE_SYNC_MASK 0
1477 #endif
1478
1479 /*
1480 * There is no default implementation for arch_sync_kernel_mappings(). It is
1481 * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
1482 * is 0.
1483 */
1484 void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
1485
1486 #endif /* CONFIG_MMU */
1487
1488 /*
1489 * No-op macros that just return the current protection value. Defined here
1490 * because these macros can be used even if CONFIG_MMU is not defined.
1491 */
1492
1493 #ifndef pgprot_nx
1494 #define pgprot_nx(prot) (prot)
1495 #endif
1496
1497 #ifndef pgprot_noncached
1498 #define pgprot_noncached(prot) (prot)
1499 #endif
1500
1501 #ifndef pgprot_writecombine
1502 #define pgprot_writecombine pgprot_noncached
1503 #endif
1504
1505 #ifndef pgprot_writethrough
1506 #define pgprot_writethrough pgprot_noncached
1507 #endif
1508
1509 #ifndef pgprot_device
1510 #define pgprot_device pgprot_noncached
1511 #endif
1512
1513 #ifndef pgprot_mhp
1514 #define pgprot_mhp(prot) (prot)
1515 #endif
1516
1517 #ifdef CONFIG_MMU
1518 #ifndef pgprot_modify
1519 #define pgprot_modify pgprot_modify
pgprot_modify(pgprot_t oldprot,pgprot_t newprot)1520 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
1521 {
1522 if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
1523 newprot = pgprot_noncached(newprot);
1524 if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
1525 newprot = pgprot_writecombine(newprot);
1526 if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
1527 newprot = pgprot_device(newprot);
1528 return newprot;
1529 }
1530 #endif
1531 #endif /* CONFIG_MMU */
1532
1533 #ifndef pgprot_encrypted
1534 #define pgprot_encrypted(prot) (prot)
1535 #endif
1536
1537 #ifndef pgprot_decrypted
1538 #define pgprot_decrypted(prot) (prot)
1539 #endif
1540
1541 /*
1542 * A facility to provide batching of the reload of page tables and
1543 * other process state with the actual context switch code for
1544 * paravirtualized guests. By convention, only one of the batched
1545 * update (lazy) modes (CPU, MMU) should be active at any given time,
1546 * entry should never be nested, and entry and exits should always be
1547 * paired. This is for sanity of maintaining and reasoning about the
1548 * kernel code. In this case, the exit (end of the context switch) is
1549 * in architecture-specific code, and so doesn't need a generic
1550 * definition.
1551 */
1552 #ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
1553 #define arch_start_context_switch(prev) do {} while (0)
1554 #endif
1555
1556 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1557 #ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_swp_mksoft_dirty(pmd_t pmd)1558 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1559 {
1560 return pmd;
1561 }
1562
pmd_swp_soft_dirty(pmd_t pmd)1563 static inline int pmd_swp_soft_dirty(pmd_t pmd)
1564 {
1565 return 0;
1566 }
1567
pmd_swp_clear_soft_dirty(pmd_t pmd)1568 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1569 {
1570 return pmd;
1571 }
1572 #endif
1573 #else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */
pte_soft_dirty(pte_t pte)1574 static inline int pte_soft_dirty(pte_t pte)
1575 {
1576 return 0;
1577 }
1578
pmd_soft_dirty(pmd_t pmd)1579 static inline int pmd_soft_dirty(pmd_t pmd)
1580 {
1581 return 0;
1582 }
1583
pte_mksoft_dirty(pte_t pte)1584 static inline pte_t pte_mksoft_dirty(pte_t pte)
1585 {
1586 return pte;
1587 }
1588
pmd_mksoft_dirty(pmd_t pmd)1589 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
1590 {
1591 return pmd;
1592 }
1593
pte_clear_soft_dirty(pte_t pte)1594 static inline pte_t pte_clear_soft_dirty(pte_t pte)
1595 {
1596 return pte;
1597 }
1598
pmd_clear_soft_dirty(pmd_t pmd)1599 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
1600 {
1601 return pmd;
1602 }
1603
pte_swp_mksoft_dirty(pte_t pte)1604 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1605 {
1606 return pte;
1607 }
1608
pte_swp_soft_dirty(pte_t pte)1609 static inline int pte_swp_soft_dirty(pte_t pte)
1610 {
1611 return 0;
1612 }
1613
pte_swp_clear_soft_dirty(pte_t pte)1614 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1615 {
1616 return pte;
1617 }
1618
pmd_swp_mksoft_dirty(pmd_t pmd)1619 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1620 {
1621 return pmd;
1622 }
1623
pmd_swp_soft_dirty(pmd_t pmd)1624 static inline int pmd_swp_soft_dirty(pmd_t pmd)
1625 {
1626 return 0;
1627 }
1628
pmd_swp_clear_soft_dirty(pmd_t pmd)1629 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1630 {
1631 return pmd;
1632 }
1633 #endif
1634
1635 #ifndef __HAVE_PFNMAP_TRACKING
1636 /*
1637 * Interfaces that can be used by architecture code to keep track of
1638 * memory type of pfn mappings specified by the remap_pfn_range,
1639 * vmf_insert_pfn.
1640 */
1641
pfnmap_setup_cachemode(unsigned long pfn,unsigned long size,pgprot_t * prot)1642 static inline int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size,
1643 pgprot_t *prot)
1644 {
1645 return 0;
1646 }
1647
pfnmap_track(unsigned long pfn,unsigned long size,pgprot_t * prot)1648 static inline int pfnmap_track(unsigned long pfn, unsigned long size,
1649 pgprot_t *prot)
1650 {
1651 return 0;
1652 }
1653
pfnmap_untrack(unsigned long pfn,unsigned long size)1654 static inline void pfnmap_untrack(unsigned long pfn, unsigned long size)
1655 {
1656 }
1657 #else
1658 /**
1659 * pfnmap_setup_cachemode - setup the cachemode in the pgprot for a pfn range
1660 * @pfn: the start of the pfn range
1661 * @size: the size of the pfn range in bytes
1662 * @prot: the pgprot to modify
1663 *
1664 * Lookup the cachemode for the pfn range starting at @pfn with the size
1665 * @size and store it in @prot, leaving other data in @prot unchanged.
1666 *
1667 * This allows for a hardware implementation to have fine-grained control of
1668 * memory cache behavior at page level granularity. Without a hardware
1669 * implementation, this function does nothing.
1670 *
1671 * Currently there is only one implementation for this - x86 Page Attribute
1672 * Table (PAT). See Documentation/arch/x86/pat.rst for more details.
1673 *
1674 * This function can fail if the pfn range spans pfns that require differing
1675 * cachemodes. If the pfn range was previously verified to have a single
1676 * cachemode, it is sufficient to query only a single pfn. The assumption is
1677 * that this is the case for drivers using the vmf_insert_pfn*() interface.
1678 *
1679 * Returns 0 on success and -EINVAL on error.
1680 */
1681 int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size,
1682 pgprot_t *prot);
1683
1684 /**
1685 * pfnmap_track - track a pfn range
1686 * @pfn: the start of the pfn range
1687 * @size: the size of the pfn range in bytes
1688 * @prot: the pgprot to track
1689 *
1690 * Requested the pfn range to be 'tracked' by a hardware implementation and
1691 * setup the cachemode in @prot similar to pfnmap_setup_cachemode().
1692 *
1693 * This allows for fine-grained control of memory cache behaviour at page
1694 * level granularity. Tracking memory this way is persisted across VMA splits
1695 * (VMA merging does not apply for VM_PFNMAP).
1696 *
1697 * Currently, there is only one implementation for this - x86 Page Attribute
1698 * Table (PAT). See Documentation/arch/x86/pat.rst for more details.
1699 *
1700 * Returns 0 on success and -EINVAL on error.
1701 */
1702 int pfnmap_track(unsigned long pfn, unsigned long size, pgprot_t *prot);
1703
1704 /**
1705 * pfnmap_untrack - untrack a pfn range
1706 * @pfn: the start of the pfn range
1707 * @size: the size of the pfn range in bytes
1708 *
1709 * Untrack a pfn range previously tracked through pfnmap_track().
1710 */
1711 void pfnmap_untrack(unsigned long pfn, unsigned long size);
1712 #endif
1713
1714 /**
1715 * pfnmap_setup_cachemode_pfn - setup the cachemode in the pgprot for a pfn
1716 * @pfn: the pfn
1717 * @prot: the pgprot to modify
1718 *
1719 * Lookup the cachemode for @pfn and store it in @prot, leaving other
1720 * data in @prot unchanged.
1721 *
1722 * See pfnmap_setup_cachemode() for details.
1723 */
pfnmap_setup_cachemode_pfn(unsigned long pfn,pgprot_t * prot)1724 static inline void pfnmap_setup_cachemode_pfn(unsigned long pfn, pgprot_t *prot)
1725 {
1726 pfnmap_setup_cachemode(pfn, PAGE_SIZE, prot);
1727 }
1728
1729 #ifdef CONFIG_MMU
1730 #ifdef __HAVE_COLOR_ZERO_PAGE
is_zero_pfn(unsigned long pfn)1731 static inline int is_zero_pfn(unsigned long pfn)
1732 {
1733 extern unsigned long zero_pfn;
1734 unsigned long offset_from_zero_pfn = pfn - zero_pfn;
1735 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
1736 }
1737
1738 #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
1739
1740 #else
is_zero_pfn(unsigned long pfn)1741 static inline int is_zero_pfn(unsigned long pfn)
1742 {
1743 extern unsigned long zero_pfn;
1744 return pfn == zero_pfn;
1745 }
1746
my_zero_pfn(unsigned long addr)1747 static inline unsigned long my_zero_pfn(unsigned long addr)
1748 {
1749 extern unsigned long zero_pfn;
1750 return zero_pfn;
1751 }
1752 #endif
1753 #else
is_zero_pfn(unsigned long pfn)1754 static inline int is_zero_pfn(unsigned long pfn)
1755 {
1756 return 0;
1757 }
1758
my_zero_pfn(unsigned long addr)1759 static inline unsigned long my_zero_pfn(unsigned long addr)
1760 {
1761 return 0;
1762 }
1763 #endif /* CONFIG_MMU */
1764
1765 #ifdef CONFIG_MMU
1766
1767 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
pmd_trans_huge(pmd_t pmd)1768 static inline int pmd_trans_huge(pmd_t pmd)
1769 {
1770 return 0;
1771 }
1772 #ifndef pmd_write
pmd_write(pmd_t pmd)1773 static inline int pmd_write(pmd_t pmd)
1774 {
1775 BUG();
1776 return 0;
1777 }
1778 #endif /* pmd_write */
1779 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1780
1781 #ifndef pud_write
pud_write(pud_t pud)1782 static inline int pud_write(pud_t pud)
1783 {
1784 BUG();
1785 return 0;
1786 }
1787 #endif /* pud_write */
1788
1789 #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
1790 !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
pud_trans_huge(pud_t pud)1791 static inline int pud_trans_huge(pud_t pud)
1792 {
1793 return 0;
1794 }
1795 #endif
1796
pud_trans_unstable(pud_t * pud)1797 static inline int pud_trans_unstable(pud_t *pud)
1798 {
1799 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
1800 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
1801 pud_t pudval = READ_ONCE(*pud);
1802
1803 if (pud_none(pudval) || pud_trans_huge(pudval))
1804 return 1;
1805 if (unlikely(pud_bad(pudval))) {
1806 pud_clear_bad(pud);
1807 return 1;
1808 }
1809 #endif
1810 return 0;
1811 }
1812
1813 #ifndef CONFIG_NUMA_BALANCING
1814 /*
1815 * In an inaccessible (PROT_NONE) VMA, pte_protnone() may indicate "yes". It is
1816 * perfectly valid to indicate "no" in that case, which is why our default
1817 * implementation defaults to "always no".
1818 *
1819 * In an accessible VMA, however, pte_protnone() reliably indicates PROT_NONE
1820 * page protection due to NUMA hinting. NUMA hinting faults only apply in
1821 * accessible VMAs.
1822 *
1823 * So, to reliably identify PROT_NONE PTEs that require a NUMA hinting fault,
1824 * looking at the VMA accessibility is sufficient.
1825 */
pte_protnone(pte_t pte)1826 static inline int pte_protnone(pte_t pte)
1827 {
1828 return 0;
1829 }
1830
pmd_protnone(pmd_t pmd)1831 static inline int pmd_protnone(pmd_t pmd)
1832 {
1833 return 0;
1834 }
1835 #endif /* CONFIG_NUMA_BALANCING */
1836
1837 #endif /* CONFIG_MMU */
1838
1839 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
1840
1841 #ifndef __PAGETABLE_P4D_FOLDED
1842 int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
1843 void p4d_clear_huge(p4d_t *p4d);
1844 #else
p4d_set_huge(p4d_t * p4d,phys_addr_t addr,pgprot_t prot)1845 static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
1846 {
1847 return 0;
1848 }
p4d_clear_huge(p4d_t * p4d)1849 static inline void p4d_clear_huge(p4d_t *p4d) { }
1850 #endif /* !__PAGETABLE_P4D_FOLDED */
1851
1852 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
1853 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
1854 int pud_clear_huge(pud_t *pud);
1855 int pmd_clear_huge(pmd_t *pmd);
1856 int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
1857 int pud_free_pmd_page(pud_t *pud, unsigned long addr);
1858 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
1859 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
p4d_set_huge(p4d_t * p4d,phys_addr_t addr,pgprot_t prot)1860 static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
1861 {
1862 return 0;
1863 }
pud_set_huge(pud_t * pud,phys_addr_t addr,pgprot_t prot)1864 static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1865 {
1866 return 0;
1867 }
pmd_set_huge(pmd_t * pmd,phys_addr_t addr,pgprot_t prot)1868 static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1869 {
1870 return 0;
1871 }
p4d_clear_huge(p4d_t * p4d)1872 static inline void p4d_clear_huge(p4d_t *p4d) { }
pud_clear_huge(pud_t * pud)1873 static inline int pud_clear_huge(pud_t *pud)
1874 {
1875 return 0;
1876 }
pmd_clear_huge(pmd_t * pmd)1877 static inline int pmd_clear_huge(pmd_t *pmd)
1878 {
1879 return 0;
1880 }
p4d_free_pud_page(p4d_t * p4d,unsigned long addr)1881 static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
1882 {
1883 return 0;
1884 }
pud_free_pmd_page(pud_t * pud,unsigned long addr)1885 static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1886 {
1887 return 0;
1888 }
pmd_free_pte_page(pmd_t * pmd,unsigned long addr)1889 static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1890 {
1891 return 0;
1892 }
1893 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
1894
1895 #ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
1896 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1897 /*
1898 * ARCHes with special requirements for evicting THP backing TLB entries can
1899 * implement this. Otherwise also, it can help optimize normal TLB flush in
1900 * THP regime. Stock flush_tlb_range() typically has optimization to nuke the
1901 * entire TLB if flush span is greater than a threshold, which will
1902 * likely be true for a single huge page. Thus a single THP flush will
1903 * invalidate the entire TLB which is not desirable.
1904 * e.g. see arch/arc: flush_pmd_tlb_range
1905 */
1906 #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1907 #define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1908 #else
1909 #define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG()
1910 #define flush_pud_tlb_range(vma, addr, end) BUILD_BUG()
1911 #endif
1912 #endif
1913
1914 struct file;
1915 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
1916 unsigned long size, pgprot_t *vma_prot);
1917
1918 #ifndef CONFIG_X86_ESPFIX64
init_espfix_bsp(void)1919 static inline void init_espfix_bsp(void) { }
1920 #endif
1921
1922 extern void __init pgtable_cache_init(void);
1923
1924 #ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
pfn_modify_allowed(unsigned long pfn,pgprot_t prot)1925 static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
1926 {
1927 return true;
1928 }
1929
arch_has_pfn_modify_check(void)1930 static inline bool arch_has_pfn_modify_check(void)
1931 {
1932 return false;
1933 }
1934 #endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
1935
1936 /*
1937 * Architecture PAGE_KERNEL_* fallbacks
1938 *
1939 * Some architectures don't define certain PAGE_KERNEL_* flags. This is either
1940 * because they really don't support them, or the port needs to be updated to
1941 * reflect the required functionality. Below are a set of relatively safe
1942 * fallbacks, as best effort, which we can count on in lieu of the architectures
1943 * not defining them on their own yet.
1944 */
1945
1946 #ifndef PAGE_KERNEL_RO
1947 # define PAGE_KERNEL_RO PAGE_KERNEL
1948 #endif
1949
1950 #ifndef PAGE_KERNEL_EXEC
1951 # define PAGE_KERNEL_EXEC PAGE_KERNEL
1952 #endif
1953
1954 /*
1955 * Page Table Modification bits for pgtbl_mod_mask.
1956 *
1957 * These are used by the p?d_alloc_track*() and p*d_populate_kernel()
1958 * functions in the generic vmalloc, ioremap and page table update code
1959 * to track at which page-table levels entries have been modified.
1960 * Based on that the code can better decide when page table changes need
1961 * to be synchronized to other page-tables in the system.
1962 */
1963 #define __PGTBL_PGD_MODIFIED 0
1964 #define __PGTBL_P4D_MODIFIED 1
1965 #define __PGTBL_PUD_MODIFIED 2
1966 #define __PGTBL_PMD_MODIFIED 3
1967 #define __PGTBL_PTE_MODIFIED 4
1968
1969 #define PGTBL_PGD_MODIFIED BIT(__PGTBL_PGD_MODIFIED)
1970 #define PGTBL_P4D_MODIFIED BIT(__PGTBL_P4D_MODIFIED)
1971 #define PGTBL_PUD_MODIFIED BIT(__PGTBL_PUD_MODIFIED)
1972 #define PGTBL_PMD_MODIFIED BIT(__PGTBL_PMD_MODIFIED)
1973 #define PGTBL_PTE_MODIFIED BIT(__PGTBL_PTE_MODIFIED)
1974
1975 /* Page-Table Modification Mask */
1976 typedef unsigned int pgtbl_mod_mask;
1977
1978 #endif /* !__ASSEMBLY__ */
1979
1980 #if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
1981 #ifdef CONFIG_PHYS_ADDR_T_64BIT
1982 /*
1983 * ZSMALLOC needs to know the highest PFN on 32-bit architectures
1984 * with physical address space extension, but falls back to
1985 * BITS_PER_LONG otherwise.
1986 */
1987 #error Missing MAX_POSSIBLE_PHYSMEM_BITS definition
1988 #else
1989 #define MAX_POSSIBLE_PHYSMEM_BITS 32
1990 #endif
1991 #endif
1992
1993 #ifndef has_transparent_hugepage
1994 #define has_transparent_hugepage() IS_BUILTIN(CONFIG_TRANSPARENT_HUGEPAGE)
1995 #endif
1996
1997 #ifndef has_transparent_pud_hugepage
1998 #define has_transparent_pud_hugepage() IS_BUILTIN(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
1999 #endif
2000 /*
2001 * On some architectures it depends on the mm if the p4d/pud or pmd
2002 * layer of the page table hierarchy is folded or not.
2003 */
2004 #ifndef mm_p4d_folded
2005 #define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
2006 #endif
2007
2008 #ifndef mm_pud_folded
2009 #define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
2010 #endif
2011
2012 #ifndef mm_pmd_folded
2013 #define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
2014 #endif
2015
2016 #ifndef p4d_offset_lockless
2017 #define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address)
2018 #endif
2019 #ifndef pud_offset_lockless
2020 #define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address)
2021 #endif
2022 #ifndef pmd_offset_lockless
2023 #define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address)
2024 #endif
2025
2026 /*
2027 * pXd_leaf() is the API to check whether a pgtable entry is a huge page
2028 * mapping. It should work globally across all archs, without any
2029 * dependency on CONFIG_* options. For architectures that do not support
2030 * huge mappings on specific levels, below fallbacks will be used.
2031 *
2032 * A leaf pgtable entry should always imply the following:
2033 *
2034 * - It is a "present" entry. IOW, before using this API, please check it
2035 * with pXd_present() first. NOTE: it may not always mean the "present
2036 * bit" is set. For example, PROT_NONE entries are always "present".
2037 *
2038 * - It should _never_ be a swap entry of any type. Above "present" check
2039 * should have guarded this, but let's be crystal clear on this.
2040 *
2041 * - It should contain a huge PFN, which points to a huge page larger than
2042 * PAGE_SIZE of the platform. The PFN format isn't important here.
2043 *
2044 * - It should cover all kinds of huge mappings (i.e. pXd_trans_huge()
2045 * or hugetlb mappings).
2046 */
2047 #ifndef pgd_leaf
2048 #define pgd_leaf(x) false
2049 #endif
2050 #ifndef p4d_leaf
2051 #define p4d_leaf(x) false
2052 #endif
2053 #ifndef pud_leaf
2054 #define pud_leaf(x) false
2055 #endif
2056 #ifndef pmd_leaf
2057 #define pmd_leaf(x) false
2058 #endif
2059
2060 #ifndef pgd_leaf_size
2061 #define pgd_leaf_size(x) (1ULL << PGDIR_SHIFT)
2062 #endif
2063 #ifndef p4d_leaf_size
2064 #define p4d_leaf_size(x) P4D_SIZE
2065 #endif
2066 #ifndef pud_leaf_size
2067 #define pud_leaf_size(x) PUD_SIZE
2068 #endif
2069 #ifndef pmd_leaf_size
2070 #define pmd_leaf_size(x) PMD_SIZE
2071 #endif
2072 #ifndef __pte_leaf_size
2073 #ifndef pte_leaf_size
2074 #define pte_leaf_size(x) PAGE_SIZE
2075 #endif
2076 #define __pte_leaf_size(x,y) pte_leaf_size(y)
2077 #endif
2078
2079 /*
2080 * We always define pmd_pfn for all archs as it's used in lots of generic
2081 * code. Now it happens too for pud_pfn (and can happen for larger
2082 * mappings too in the future; we're not there yet). Instead of defining
2083 * it for all archs (like pmd_pfn), provide a fallback.
2084 *
2085 * Note that returning 0 here means any arch that didn't define this can
2086 * get severely wrong when it hits a real pud leaf. It's arch's
2087 * responsibility to properly define it when a huge pud is possible.
2088 */
2089 #ifndef pud_pfn
2090 #define pud_pfn(x) 0
2091 #endif
2092
2093 /*
2094 * Some architectures have MMUs that are configurable or selectable at boot
2095 * time. These lead to variable PTRS_PER_x. For statically allocated arrays it
2096 * helps to have a static maximum value.
2097 */
2098
2099 #ifndef MAX_PTRS_PER_PTE
2100 #define MAX_PTRS_PER_PTE PTRS_PER_PTE
2101 #endif
2102
2103 #ifndef MAX_PTRS_PER_PMD
2104 #define MAX_PTRS_PER_PMD PTRS_PER_PMD
2105 #endif
2106
2107 #ifndef MAX_PTRS_PER_PUD
2108 #define MAX_PTRS_PER_PUD PTRS_PER_PUD
2109 #endif
2110
2111 #ifndef MAX_PTRS_PER_P4D
2112 #define MAX_PTRS_PER_P4D PTRS_PER_P4D
2113 #endif
2114
2115 #ifndef pte_pgprot
2116 #define pte_pgprot(x) ((pgprot_t) {0})
2117 #endif
2118
2119 #ifndef pmd_pgprot
2120 #define pmd_pgprot(x) ((pgprot_t) {0})
2121 #endif
2122
2123 #ifndef pud_pgprot
2124 #define pud_pgprot(x) ((pgprot_t) {0})
2125 #endif
2126
2127 /* description of effects of mapping type and prot in current implementation.
2128 * this is due to the limited x86 page protection hardware. The expected
2129 * behavior is in parens:
2130 *
2131 * map_type prot
2132 * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
2133 * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
2134 * w: (no) no w: (no) no w: (yes) yes w: (no) no
2135 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
2136 *
2137 * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
2138 * w: (no) no w: (no) no w: (copy) copy w: (no) no
2139 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
2140 *
2141 * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
2142 * MAP_PRIVATE (with Enhanced PAN supported):
2143 * r: (no) no
2144 * w: (no) no
2145 * x: (yes) yes
2146 */
2147 #define DECLARE_VM_GET_PAGE_PROT \
2148 pgprot_t vm_get_page_prot(vm_flags_t vm_flags) \
2149 { \
2150 return protection_map[vm_flags & \
2151 (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)]; \
2152 } \
2153 EXPORT_SYMBOL(vm_get_page_prot);
2154
2155 #endif /* _LINUX_PGTABLE_H */
2156