xref: /linux/include/linux/pgtable.h (revision 3dfde97800e06882960cc926d2c428f2128b7c70)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PGTABLE_H
3 #define _LINUX_PGTABLE_H
4 
5 #include <linux/pfn.h>
6 #include <asm/pgtable.h>
7 
8 #define PMD_ORDER	(PMD_SHIFT - PAGE_SHIFT)
9 #define PUD_ORDER	(PUD_SHIFT - PAGE_SHIFT)
10 
11 #ifndef __ASSEMBLY__
12 #ifdef CONFIG_MMU
13 
14 #include <linux/mm_types.h>
15 #include <linux/bug.h>
16 #include <linux/errno.h>
17 #include <asm-generic/pgtable_uffd.h>
18 #include <linux/page_table_check.h>
19 
20 #if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
21 	defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
22 #error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
23 #endif
24 
25 /*
26  * On almost all architectures and configurations, 0 can be used as the
27  * upper ceiling to free_pgtables(): on many architectures it has the same
28  * effect as using TASK_SIZE.  However, there is one configuration which
29  * must impose a more careful limit, to avoid freeing kernel pgtables.
30  */
31 #ifndef USER_PGTABLES_CEILING
32 #define USER_PGTABLES_CEILING	0UL
33 #endif
34 
35 /*
36  * This defines the first usable user address. Platforms
37  * can override its value with custom FIRST_USER_ADDRESS
38  * defined in their respective <asm/pgtable.h>.
39  */
40 #ifndef FIRST_USER_ADDRESS
41 #define FIRST_USER_ADDRESS	0UL
42 #endif
43 
44 /*
45  * This defines the generic helper for accessing PMD page
46  * table page. Although platforms can still override this
47  * via their respective <asm/pgtable.h>.
48  */
49 #ifndef pmd_pgtable
50 #define pmd_pgtable(pmd) pmd_page(pmd)
51 #endif
52 
53 #define pmd_folio(pmd) page_folio(pmd_page(pmd))
54 
55 /*
56  * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD]
57  *
58  * The pXx_index() functions return the index of the entry in the page
59  * table page which would control the given virtual address
60  *
61  * As these functions may be used by the same code for different levels of
62  * the page table folding, they are always available, regardless of
63  * CONFIG_PGTABLE_LEVELS value. For the folded levels they simply return 0
64  * because in such cases PTRS_PER_PxD equals 1.
65  */
66 
67 static inline unsigned long pte_index(unsigned long address)
68 {
69 	return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
70 }
71 
72 #ifndef pmd_index
73 static inline unsigned long pmd_index(unsigned long address)
74 {
75 	return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
76 }
77 #define pmd_index pmd_index
78 #endif
79 
80 #ifndef pud_index
81 static inline unsigned long pud_index(unsigned long address)
82 {
83 	return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
84 }
85 #define pud_index pud_index
86 #endif
87 
88 #ifndef pgd_index
89 /* Must be a compile-time constant, so implement it as a macro */
90 #define pgd_index(a)  (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
91 #endif
92 
93 #ifndef kernel_pte_init
94 static inline void kernel_pte_init(void *addr)
95 {
96 }
97 #define kernel_pte_init kernel_pte_init
98 #endif
99 
100 #ifndef pmd_init
101 static inline void pmd_init(void *addr)
102 {
103 }
104 #define pmd_init pmd_init
105 #endif
106 
107 #ifndef pud_init
108 static inline void pud_init(void *addr)
109 {
110 }
111 #define pud_init pud_init
112 #endif
113 
114 #ifndef pte_offset_kernel
115 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
116 {
117 	return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
118 }
119 #define pte_offset_kernel pte_offset_kernel
120 #endif
121 
122 #ifdef CONFIG_HIGHPTE
123 #define __pte_map(pmd, address) \
124 	((pte_t *)kmap_local_page(pmd_page(*(pmd))) + pte_index((address)))
125 #define pte_unmap(pte)	do {	\
126 	kunmap_local((pte));	\
127 	rcu_read_unlock();	\
128 } while (0)
129 #else
130 static inline pte_t *__pte_map(pmd_t *pmd, unsigned long address)
131 {
132 	return pte_offset_kernel(pmd, address);
133 }
134 static inline void pte_unmap(pte_t *pte)
135 {
136 	rcu_read_unlock();
137 }
138 #endif
139 
140 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable);
141 
142 /* Find an entry in the second-level page table.. */
143 #ifndef pmd_offset
144 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
145 {
146 	return pud_pgtable(*pud) + pmd_index(address);
147 }
148 #define pmd_offset pmd_offset
149 #endif
150 
151 #ifndef pud_offset
152 static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
153 {
154 	return p4d_pgtable(*p4d) + pud_index(address);
155 }
156 #define pud_offset pud_offset
157 #endif
158 
159 static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address)
160 {
161 	return (pgd + pgd_index(address));
162 };
163 
164 /*
165  * a shortcut to get a pgd_t in a given mm
166  */
167 #ifndef pgd_offset
168 #define pgd_offset(mm, address)		pgd_offset_pgd((mm)->pgd, (address))
169 #endif
170 
171 /*
172  * a shortcut which implies the use of the kernel's pgd, instead
173  * of a process's
174  */
175 #define pgd_offset_k(address)		pgd_offset(&init_mm, (address))
176 
177 /*
178  * In many cases it is known that a virtual address is mapped at PMD or PTE
179  * level, so instead of traversing all the page table levels, we can get a
180  * pointer to the PMD entry in user or kernel page table or translate a virtual
181  * address to the pointer in the PTE in the kernel page tables with simple
182  * helpers.
183  */
184 static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va)
185 {
186 	return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va);
187 }
188 
189 static inline pmd_t *pmd_off_k(unsigned long va)
190 {
191 	return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va);
192 }
193 
194 static inline pte_t *virt_to_kpte(unsigned long vaddr)
195 {
196 	pmd_t *pmd = pmd_off_k(vaddr);
197 
198 	return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
199 }
200 
201 #ifndef pmd_young
202 static inline int pmd_young(pmd_t pmd)
203 {
204 	return 0;
205 }
206 #endif
207 
208 #ifndef pmd_dirty
209 static inline int pmd_dirty(pmd_t pmd)
210 {
211 	return 0;
212 }
213 #endif
214 
215 /*
216  * A facility to provide lazy MMU batching.  This allows PTE updates and
217  * page invalidations to be delayed until a call to leave lazy MMU mode
218  * is issued.  Some architectures may benefit from doing this, and it is
219  * beneficial for both shadow and direct mode hypervisors, which may batch
220  * the PTE updates which happen during this window.  Note that using this
221  * interface requires that read hazards be removed from the code.  A read
222  * hazard could result in the direct mode hypervisor case, since the actual
223  * write to the page tables may not yet have taken place, so reads though
224  * a raw PTE pointer after it has been modified are not guaranteed to be
225  * up to date.
226  *
227  * In the general case, no lock is guaranteed to be held between entry and exit
228  * of the lazy mode. So the implementation must assume preemption may be enabled
229  * and cpu migration is possible; it must take steps to be robust against this.
230  * (In practice, for user PTE updates, the appropriate page table lock(s) are
231  * held, but for kernel PTE updates, no lock is held). Nesting is not permitted
232  * and the mode cannot be used in interrupt context.
233  */
234 #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
235 #define arch_enter_lazy_mmu_mode()	do {} while (0)
236 #define arch_leave_lazy_mmu_mode()	do {} while (0)
237 #define arch_flush_lazy_mmu_mode()	do {} while (0)
238 #endif
239 
240 #ifndef pte_batch_hint
241 /**
242  * pte_batch_hint - Number of pages that can be added to batch without scanning.
243  * @ptep: Page table pointer for the entry.
244  * @pte: Page table entry.
245  *
246  * Some architectures know that a set of contiguous ptes all map the same
247  * contiguous memory with the same permissions. In this case, it can provide a
248  * hint to aid pte batching without the core code needing to scan every pte.
249  *
250  * An architecture implementation may ignore the PTE accessed state. Further,
251  * the dirty state must apply atomically to all the PTEs described by the hint.
252  *
253  * May be overridden by the architecture, else pte_batch_hint is always 1.
254  */
255 static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte)
256 {
257 	return 1;
258 }
259 #endif
260 
261 #ifndef pte_advance_pfn
262 static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr)
263 {
264 	return __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT));
265 }
266 #endif
267 
268 #define pte_next_pfn(pte) pte_advance_pfn(pte, 1)
269 
270 #ifndef set_ptes
271 /**
272  * set_ptes - Map consecutive pages to a contiguous range of addresses.
273  * @mm: Address space to map the pages into.
274  * @addr: Address to map the first page at.
275  * @ptep: Page table pointer for the first entry.
276  * @pte: Page table entry for the first page.
277  * @nr: Number of pages to map.
278  *
279  * When nr==1, initial state of pte may be present or not present, and new state
280  * may be present or not present. When nr>1, initial state of all ptes must be
281  * not present, and new state must be present.
282  *
283  * May be overridden by the architecture, or the architecture can define
284  * set_pte() and PFN_PTE_SHIFT.
285  *
286  * Context: The caller holds the page table lock.  The pages all belong
287  * to the same folio.  The PTEs are all in the same PMD.
288  */
289 static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
290 		pte_t *ptep, pte_t pte, unsigned int nr)
291 {
292 	page_table_check_ptes_set(mm, ptep, pte, nr);
293 
294 	for (;;) {
295 		set_pte(ptep, pte);
296 		if (--nr == 0)
297 			break;
298 		ptep++;
299 		pte = pte_next_pfn(pte);
300 	}
301 }
302 #endif
303 #define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)
304 
305 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
306 extern int ptep_set_access_flags(struct vm_area_struct *vma,
307 				 unsigned long address, pte_t *ptep,
308 				 pte_t entry, int dirty);
309 #endif
310 
311 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
312 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
313 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
314 				 unsigned long address, pmd_t *pmdp,
315 				 pmd_t entry, int dirty);
316 extern int pudp_set_access_flags(struct vm_area_struct *vma,
317 				 unsigned long address, pud_t *pudp,
318 				 pud_t entry, int dirty);
319 #else
320 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
321 					unsigned long address, pmd_t *pmdp,
322 					pmd_t entry, int dirty)
323 {
324 	BUILD_BUG();
325 	return 0;
326 }
327 static inline int pudp_set_access_flags(struct vm_area_struct *vma,
328 					unsigned long address, pud_t *pudp,
329 					pud_t entry, int dirty)
330 {
331 	BUILD_BUG();
332 	return 0;
333 }
334 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
335 #endif
336 
337 #ifndef ptep_get
338 static inline pte_t ptep_get(pte_t *ptep)
339 {
340 	return READ_ONCE(*ptep);
341 }
342 #endif
343 
344 #ifndef pmdp_get
345 static inline pmd_t pmdp_get(pmd_t *pmdp)
346 {
347 	return READ_ONCE(*pmdp);
348 }
349 #endif
350 
351 #ifndef pudp_get
352 static inline pud_t pudp_get(pud_t *pudp)
353 {
354 	return READ_ONCE(*pudp);
355 }
356 #endif
357 
358 #ifndef p4dp_get
359 static inline p4d_t p4dp_get(p4d_t *p4dp)
360 {
361 	return READ_ONCE(*p4dp);
362 }
363 #endif
364 
365 #ifndef pgdp_get
366 static inline pgd_t pgdp_get(pgd_t *pgdp)
367 {
368 	return READ_ONCE(*pgdp);
369 }
370 #endif
371 
372 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
373 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
374 					    unsigned long address,
375 					    pte_t *ptep)
376 {
377 	pte_t pte = ptep_get(ptep);
378 	int r = 1;
379 	if (!pte_young(pte))
380 		r = 0;
381 	else
382 		set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
383 	return r;
384 }
385 #endif
386 
387 #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
388 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
389 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
390 					    unsigned long address,
391 					    pmd_t *pmdp)
392 {
393 	pmd_t pmd = *pmdp;
394 	int r = 1;
395 	if (!pmd_young(pmd))
396 		r = 0;
397 	else
398 		set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
399 	return r;
400 }
401 #else
402 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
403 					    unsigned long address,
404 					    pmd_t *pmdp)
405 {
406 	BUILD_BUG();
407 	return 0;
408 }
409 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */
410 #endif
411 
412 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
413 int ptep_clear_flush_young(struct vm_area_struct *vma,
414 			   unsigned long address, pte_t *ptep);
415 #endif
416 
417 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
418 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
419 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
420 				  unsigned long address, pmd_t *pmdp);
421 #else
422 /*
423  * Despite relevant to THP only, this API is called from generic rmap code
424  * under PageTransHuge(), hence needs a dummy implementation for !THP
425  */
426 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
427 					 unsigned long address, pmd_t *pmdp)
428 {
429 	BUILD_BUG();
430 	return 0;
431 }
432 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
433 #endif
434 
435 #ifndef arch_has_hw_nonleaf_pmd_young
436 /*
437  * Return whether the accessed bit in non-leaf PMD entries is supported on the
438  * local CPU.
439  */
440 static inline bool arch_has_hw_nonleaf_pmd_young(void)
441 {
442 	return IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG);
443 }
444 #endif
445 
446 #ifndef arch_has_hw_pte_young
447 /*
448  * Return whether the accessed bit is supported on the local CPU.
449  *
450  * This stub assumes accessing through an old PTE triggers a page fault.
451  * Architectures that automatically set the access bit should overwrite it.
452  */
453 static inline bool arch_has_hw_pte_young(void)
454 {
455 	return IS_ENABLED(CONFIG_ARCH_HAS_HW_PTE_YOUNG);
456 }
457 #endif
458 
459 #ifndef exec_folio_order
460 /*
461  * Returns preferred minimum folio order for executable file-backed memory. Must
462  * be in range [0, PMD_ORDER). Default to order-0.
463  */
464 static inline unsigned int exec_folio_order(void)
465 {
466 	return 0;
467 }
468 #endif
469 
470 #ifndef arch_check_zapped_pte
471 static inline void arch_check_zapped_pte(struct vm_area_struct *vma,
472 					 pte_t pte)
473 {
474 }
475 #endif
476 
477 #ifndef arch_check_zapped_pmd
478 static inline void arch_check_zapped_pmd(struct vm_area_struct *vma,
479 					 pmd_t pmd)
480 {
481 }
482 #endif
483 
484 #ifndef arch_check_zapped_pud
485 static inline void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud)
486 {
487 }
488 #endif
489 
490 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
491 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
492 				       unsigned long address,
493 				       pte_t *ptep)
494 {
495 	pte_t pte = ptep_get(ptep);
496 	pte_clear(mm, address, ptep);
497 	page_table_check_pte_clear(mm, pte);
498 	return pte;
499 }
500 #endif
501 
502 #ifndef clear_young_dirty_ptes
503 /**
504  * clear_young_dirty_ptes - Mark PTEs that map consecutive pages of the
505  *		same folio as old/clean.
506  * @mm: Address space the pages are mapped into.
507  * @addr: Address the first page is mapped at.
508  * @ptep: Page table pointer for the first entry.
509  * @nr: Number of entries to mark old/clean.
510  * @flags: Flags to modify the PTE batch semantics.
511  *
512  * May be overridden by the architecture; otherwise, implemented by
513  * get_and_clear/modify/set for each pte in the range.
514  *
515  * Note that PTE bits in the PTE range besides the PFN can differ. For example,
516  * some PTEs might be write-protected.
517  *
518  * Context: The caller holds the page table lock.  The PTEs map consecutive
519  * pages that belong to the same folio.  The PTEs are all in the same PMD.
520  */
521 static inline void clear_young_dirty_ptes(struct vm_area_struct *vma,
522 					  unsigned long addr, pte_t *ptep,
523 					  unsigned int nr, cydp_t flags)
524 {
525 	pte_t pte;
526 
527 	for (;;) {
528 		if (flags == CYDP_CLEAR_YOUNG)
529 			ptep_test_and_clear_young(vma, addr, ptep);
530 		else {
531 			pte = ptep_get_and_clear(vma->vm_mm, addr, ptep);
532 			if (flags & CYDP_CLEAR_YOUNG)
533 				pte = pte_mkold(pte);
534 			if (flags & CYDP_CLEAR_DIRTY)
535 				pte = pte_mkclean(pte);
536 			set_pte_at(vma->vm_mm, addr, ptep, pte);
537 		}
538 		if (--nr == 0)
539 			break;
540 		ptep++;
541 		addr += PAGE_SIZE;
542 	}
543 }
544 #endif
545 
546 static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
547 			      pte_t *ptep)
548 {
549 	pte_t pte = ptep_get(ptep);
550 
551 	pte_clear(mm, addr, ptep);
552 	/*
553 	 * No need for ptep_get_and_clear(): page table check doesn't care about
554 	 * any bits that could have been set by HW concurrently.
555 	 */
556 	page_table_check_pte_clear(mm, pte);
557 }
558 
559 #ifdef CONFIG_GUP_GET_PXX_LOW_HIGH
560 /*
561  * For walking the pagetables without holding any locks.  Some architectures
562  * (eg x86-32 PAE) cannot load the entries atomically without using expensive
563  * instructions.  We are guaranteed that a PTE will only either go from not
564  * present to present, or present to not present -- it will not switch to a
565  * completely different present page without a TLB flush inbetween; which we
566  * are blocking by holding interrupts off.
567  *
568  * Setting ptes from not present to present goes:
569  *
570  *   ptep->pte_high = h;
571  *   smp_wmb();
572  *   ptep->pte_low = l;
573  *
574  * And present to not present goes:
575  *
576  *   ptep->pte_low = 0;
577  *   smp_wmb();
578  *   ptep->pte_high = 0;
579  *
580  * We must ensure here that the load of pte_low sees 'l' IFF pte_high sees 'h'.
581  * We load pte_high *after* loading pte_low, which ensures we don't see an older
582  * value of pte_high.  *Then* we recheck pte_low, which ensures that we haven't
583  * picked up a changed pte high. We might have gotten rubbish values from
584  * pte_low and pte_high, but we are guaranteed that pte_low will not have the
585  * present bit set *unless* it is 'l'. Because get_user_pages_fast() only
586  * operates on present ptes we're safe.
587  */
588 static inline pte_t ptep_get_lockless(pte_t *ptep)
589 {
590 	pte_t pte;
591 
592 	do {
593 		pte.pte_low = ptep->pte_low;
594 		smp_rmb();
595 		pte.pte_high = ptep->pte_high;
596 		smp_rmb();
597 	} while (unlikely(pte.pte_low != ptep->pte_low));
598 
599 	return pte;
600 }
601 #define ptep_get_lockless ptep_get_lockless
602 
603 #if CONFIG_PGTABLE_LEVELS > 2
604 static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
605 {
606 	pmd_t pmd;
607 
608 	do {
609 		pmd.pmd_low = pmdp->pmd_low;
610 		smp_rmb();
611 		pmd.pmd_high = pmdp->pmd_high;
612 		smp_rmb();
613 	} while (unlikely(pmd.pmd_low != pmdp->pmd_low));
614 
615 	return pmd;
616 }
617 #define pmdp_get_lockless pmdp_get_lockless
618 #define pmdp_get_lockless_sync() tlb_remove_table_sync_one()
619 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
620 #endif /* CONFIG_GUP_GET_PXX_LOW_HIGH */
621 
622 /*
623  * We require that the PTE can be read atomically.
624  */
625 #ifndef ptep_get_lockless
626 static inline pte_t ptep_get_lockless(pte_t *ptep)
627 {
628 	return ptep_get(ptep);
629 }
630 #endif
631 
632 #ifndef pmdp_get_lockless
633 static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
634 {
635 	return pmdp_get(pmdp);
636 }
637 static inline void pmdp_get_lockless_sync(void)
638 {
639 }
640 #endif
641 
642 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
643 #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
644 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
645 					    unsigned long address,
646 					    pmd_t *pmdp)
647 {
648 	pmd_t pmd = *pmdp;
649 
650 	pmd_clear(pmdp);
651 	page_table_check_pmd_clear(mm, pmd);
652 
653 	return pmd;
654 }
655 #endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */
656 #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
657 static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
658 					    unsigned long address,
659 					    pud_t *pudp)
660 {
661 	pud_t pud = *pudp;
662 
663 	pud_clear(pudp);
664 	page_table_check_pud_clear(mm, pud);
665 
666 	return pud;
667 }
668 #endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */
669 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
670 
671 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
672 #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
673 static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
674 					    unsigned long address, pmd_t *pmdp,
675 					    int full)
676 {
677 	return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
678 }
679 #endif
680 
681 #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
682 static inline pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma,
683 					    unsigned long address, pud_t *pudp,
684 					    int full)
685 {
686 	return pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
687 }
688 #endif
689 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
690 
691 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
692 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
693 					    unsigned long address, pte_t *ptep,
694 					    int full)
695 {
696 	return ptep_get_and_clear(mm, address, ptep);
697 }
698 #endif
699 
700 #ifndef get_and_clear_full_ptes
701 /**
702  * get_and_clear_full_ptes - Clear present PTEs that map consecutive pages of
703  *			     the same folio, collecting dirty/accessed bits.
704  * @mm: Address space the pages are mapped into.
705  * @addr: Address the first page is mapped at.
706  * @ptep: Page table pointer for the first entry.
707  * @nr: Number of entries to clear.
708  * @full: Whether we are clearing a full mm.
709  *
710  * May be overridden by the architecture; otherwise, implemented as a simple
711  * loop over ptep_get_and_clear_full(), merging dirty/accessed bits into the
712  * returned PTE.
713  *
714  * Note that PTE bits in the PTE range besides the PFN can differ. For example,
715  * some PTEs might be write-protected.
716  *
717  * Context: The caller holds the page table lock.  The PTEs map consecutive
718  * pages that belong to the same folio.  The PTEs are all in the same PMD.
719  */
720 static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm,
721 		unsigned long addr, pte_t *ptep, unsigned int nr, int full)
722 {
723 	pte_t pte, tmp_pte;
724 
725 	pte = ptep_get_and_clear_full(mm, addr, ptep, full);
726 	while (--nr) {
727 		ptep++;
728 		addr += PAGE_SIZE;
729 		tmp_pte = ptep_get_and_clear_full(mm, addr, ptep, full);
730 		if (pte_dirty(tmp_pte))
731 			pte = pte_mkdirty(pte);
732 		if (pte_young(tmp_pte))
733 			pte = pte_mkyoung(pte);
734 	}
735 	return pte;
736 }
737 #endif
738 
739 /**
740  * get_and_clear_ptes - Clear present PTEs that map consecutive pages of
741  *			the same folio, collecting dirty/accessed bits.
742  * @mm: Address space the pages are mapped into.
743  * @addr: Address the first page is mapped at.
744  * @ptep: Page table pointer for the first entry.
745  * @nr: Number of entries to clear.
746  *
747  * Use this instead of get_and_clear_full_ptes() if it is known that we don't
748  * need to clear the full mm, which is mostly the case.
749  *
750  * Note that PTE bits in the PTE range besides the PFN can differ. For example,
751  * some PTEs might be write-protected.
752  *
753  * Context: The caller holds the page table lock.  The PTEs map consecutive
754  * pages that belong to the same folio.  The PTEs are all in the same PMD.
755  */
756 static inline pte_t get_and_clear_ptes(struct mm_struct *mm, unsigned long addr,
757 		pte_t *ptep, unsigned int nr)
758 {
759 	return get_and_clear_full_ptes(mm, addr, ptep, nr, 0);
760 }
761 
762 #ifndef clear_full_ptes
763 /**
764  * clear_full_ptes - Clear present PTEs that map consecutive pages of the same
765  *		     folio.
766  * @mm: Address space the pages are mapped into.
767  * @addr: Address the first page is mapped at.
768  * @ptep: Page table pointer for the first entry.
769  * @nr: Number of entries to clear.
770  * @full: Whether we are clearing a full mm.
771  *
772  * May be overridden by the architecture; otherwise, implemented as a simple
773  * loop over ptep_get_and_clear_full().
774  *
775  * Note that PTE bits in the PTE range besides the PFN can differ. For example,
776  * some PTEs might be write-protected.
777  *
778  * Context: The caller holds the page table lock.  The PTEs map consecutive
779  * pages that belong to the same folio.  The PTEs are all in the same PMD.
780  */
781 static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr,
782 		pte_t *ptep, unsigned int nr, int full)
783 {
784 	for (;;) {
785 		ptep_get_and_clear_full(mm, addr, ptep, full);
786 		if (--nr == 0)
787 			break;
788 		ptep++;
789 		addr += PAGE_SIZE;
790 	}
791 }
792 #endif
793 
794 /**
795  * clear_ptes - Clear present PTEs that map consecutive pages of the same folio.
796  * @mm: Address space the pages are mapped into.
797  * @addr: Address the first page is mapped at.
798  * @ptep: Page table pointer for the first entry.
799  * @nr: Number of entries to clear.
800  *
801  * Use this instead of clear_full_ptes() if it is known that we don't need to
802  * clear the full mm, which is mostly the case.
803  *
804  * Note that PTE bits in the PTE range besides the PFN can differ. For example,
805  * some PTEs might be write-protected.
806  *
807  * Context: The caller holds the page table lock.  The PTEs map consecutive
808  * pages that belong to the same folio.  The PTEs are all in the same PMD.
809  */
810 static inline void clear_ptes(struct mm_struct *mm, unsigned long addr,
811 		pte_t *ptep, unsigned int nr)
812 {
813 	clear_full_ptes(mm, addr, ptep, nr, 0);
814 }
815 
816 /*
817  * If two threads concurrently fault at the same page, the thread that
818  * won the race updates the PTE and its local TLB/Cache. The other thread
819  * gives up, simply does nothing, and continues; on architectures where
820  * software can update TLB,  local TLB can be updated here to avoid next page
821  * fault. This function updates TLB only, do nothing with cache or others.
822  * It is the difference with function update_mmu_cache.
823  */
824 #ifndef update_mmu_tlb_range
825 static inline void update_mmu_tlb_range(struct vm_area_struct *vma,
826 				unsigned long address, pte_t *ptep, unsigned int nr)
827 {
828 }
829 #endif
830 
831 static inline void update_mmu_tlb(struct vm_area_struct *vma,
832 				unsigned long address, pte_t *ptep)
833 {
834 	update_mmu_tlb_range(vma, address, ptep, 1);
835 }
836 
837 /*
838  * Some architectures may be able to avoid expensive synchronization
839  * primitives when modifications are made to PTE's which are already
840  * not present, or in the process of an address space destruction.
841  */
842 #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
843 static inline void pte_clear_not_present_full(struct mm_struct *mm,
844 					      unsigned long address,
845 					      pte_t *ptep,
846 					      int full)
847 {
848 	pte_clear(mm, address, ptep);
849 }
850 #endif
851 
852 #ifndef clear_not_present_full_ptes
853 /**
854  * clear_not_present_full_ptes - Clear multiple not present PTEs which are
855  *				 consecutive in the pgtable.
856  * @mm: Address space the ptes represent.
857  * @addr: Address of the first pte.
858  * @ptep: Page table pointer for the first entry.
859  * @nr: Number of entries to clear.
860  * @full: Whether we are clearing a full mm.
861  *
862  * May be overridden by the architecture; otherwise, implemented as a simple
863  * loop over pte_clear_not_present_full().
864  *
865  * Context: The caller holds the page table lock.  The PTEs are all not present.
866  * The PTEs are all in the same PMD.
867  */
868 static inline void clear_not_present_full_ptes(struct mm_struct *mm,
869 		unsigned long addr, pte_t *ptep, unsigned int nr, int full)
870 {
871 	for (;;) {
872 		pte_clear_not_present_full(mm, addr, ptep, full);
873 		if (--nr == 0)
874 			break;
875 		ptep++;
876 		addr += PAGE_SIZE;
877 	}
878 }
879 #endif
880 
881 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
882 extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
883 			      unsigned long address,
884 			      pte_t *ptep);
885 #endif
886 
887 #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
888 extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
889 			      unsigned long address,
890 			      pmd_t *pmdp);
891 extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
892 			      unsigned long address,
893 			      pud_t *pudp);
894 #endif
895 
896 #ifndef pte_mkwrite
897 static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
898 {
899 	return pte_mkwrite_novma(pte);
900 }
901 #endif
902 
903 #if defined(CONFIG_ARCH_WANT_PMD_MKWRITE) && !defined(pmd_mkwrite)
904 static inline pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
905 {
906 	return pmd_mkwrite_novma(pmd);
907 }
908 #endif
909 
910 #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
911 struct mm_struct;
912 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
913 {
914 	pte_t old_pte = ptep_get(ptep);
915 	set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
916 }
917 #endif
918 
919 #ifndef wrprotect_ptes
920 /**
921  * wrprotect_ptes - Write-protect PTEs that map consecutive pages of the same
922  *		    folio.
923  * @mm: Address space the pages are mapped into.
924  * @addr: Address the first page is mapped at.
925  * @ptep: Page table pointer for the first entry.
926  * @nr: Number of entries to write-protect.
927  *
928  * May be overridden by the architecture; otherwise, implemented as a simple
929  * loop over ptep_set_wrprotect().
930  *
931  * Note that PTE bits in the PTE range besides the PFN can differ. For example,
932  * some PTEs might be write-protected.
933  *
934  * Context: The caller holds the page table lock.  The PTEs map consecutive
935  * pages that belong to the same folio.  The PTEs are all in the same PMD.
936  */
937 static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
938 		pte_t *ptep, unsigned int nr)
939 {
940 	for (;;) {
941 		ptep_set_wrprotect(mm, addr, ptep);
942 		if (--nr == 0)
943 			break;
944 		ptep++;
945 		addr += PAGE_SIZE;
946 	}
947 }
948 #endif
949 
950 /*
951  * On some architectures hardware does not set page access bit when accessing
952  * memory page, it is responsibility of software setting this bit. It brings
953  * out extra page fault penalty to track page access bit. For optimization page
954  * access bit can be set during all page fault flow on these arches.
955  * To be differentiate with macro pte_mkyoung, this macro is used on platforms
956  * where software maintains page access bit.
957  */
958 #ifndef pte_sw_mkyoung
959 static inline pte_t pte_sw_mkyoung(pte_t pte)
960 {
961 	return pte;
962 }
963 #define pte_sw_mkyoung	pte_sw_mkyoung
964 #endif
965 
966 #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
967 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
968 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
969 				      unsigned long address, pmd_t *pmdp)
970 {
971 	pmd_t old_pmd = *pmdp;
972 	set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
973 }
974 #else
975 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
976 				      unsigned long address, pmd_t *pmdp)
977 {
978 	BUILD_BUG();
979 }
980 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
981 #endif
982 #ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
983 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
984 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
985 static inline void pudp_set_wrprotect(struct mm_struct *mm,
986 				      unsigned long address, pud_t *pudp)
987 {
988 	pud_t old_pud = *pudp;
989 
990 	set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
991 }
992 #else
993 static inline void pudp_set_wrprotect(struct mm_struct *mm,
994 				      unsigned long address, pud_t *pudp)
995 {
996 	BUILD_BUG();
997 }
998 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
999 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1000 #endif
1001 
1002 #ifndef pmdp_collapse_flush
1003 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1004 extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1005 				 unsigned long address, pmd_t *pmdp);
1006 #else
1007 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1008 					unsigned long address,
1009 					pmd_t *pmdp)
1010 {
1011 	BUILD_BUG();
1012 	return *pmdp;
1013 }
1014 #define pmdp_collapse_flush pmdp_collapse_flush
1015 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1016 #endif
1017 
1018 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
1019 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1020 				       pgtable_t pgtable);
1021 #endif
1022 
1023 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
1024 extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1025 #endif
1026 
1027 #ifndef arch_needs_pgtable_deposit
1028 #define arch_needs_pgtable_deposit() (false)
1029 #endif
1030 
1031 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1032 /*
1033  * This is an implementation of pmdp_establish() that is only suitable for an
1034  * architecture that doesn't have hardware dirty/accessed bits. In this case we
1035  * can't race with CPU which sets these bits and non-atomic approach is fine.
1036  */
1037 static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
1038 		unsigned long address, pmd_t *pmdp, pmd_t pmd)
1039 {
1040 	pmd_t old_pmd = *pmdp;
1041 	set_pmd_at(vma->vm_mm, address, pmdp, pmd);
1042 	return old_pmd;
1043 }
1044 #endif
1045 
1046 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
1047 extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
1048 			    pmd_t *pmdp);
1049 #endif
1050 
1051 #ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD
1052 
1053 /*
1054  * pmdp_invalidate_ad() invalidates the PMD while changing a transparent
1055  * hugepage mapping in the page tables. This function is similar to
1056  * pmdp_invalidate(), but should only be used if the access and dirty bits would
1057  * not be cleared by the software in the new PMD value. The function ensures
1058  * that hardware changes of the access and dirty bits updates would not be lost.
1059  *
1060  * Doing so can allow in certain architectures to avoid a TLB flush in most
1061  * cases. Yet, another TLB flush might be necessary later if the PMD update
1062  * itself requires such flush (e.g., if protection was set to be stricter). Yet,
1063  * even when a TLB flush is needed because of the update, the caller may be able
1064  * to batch these TLB flushing operations, so fewer TLB flush operations are
1065  * needed.
1066  */
1067 extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
1068 				unsigned long address, pmd_t *pmdp);
1069 #endif
1070 
1071 #ifndef __HAVE_ARCH_PTE_SAME
1072 static inline int pte_same(pte_t pte_a, pte_t pte_b)
1073 {
1074 	return pte_val(pte_a) == pte_val(pte_b);
1075 }
1076 #endif
1077 
1078 #ifndef __HAVE_ARCH_PTE_UNUSED
1079 /*
1080  * Some architectures provide facilities to virtualization guests
1081  * so that they can flag allocated pages as unused. This allows the
1082  * host to transparently reclaim unused pages. This function returns
1083  * whether the pte's page is unused.
1084  */
1085 static inline int pte_unused(pte_t pte)
1086 {
1087 	return 0;
1088 }
1089 #endif
1090 
1091 #ifndef pte_access_permitted
1092 #define pte_access_permitted(pte, write) \
1093 	(pte_present(pte) && (!(write) || pte_write(pte)))
1094 #endif
1095 
1096 #ifndef pmd_access_permitted
1097 #define pmd_access_permitted(pmd, write) \
1098 	(pmd_present(pmd) && (!(write) || pmd_write(pmd)))
1099 #endif
1100 
1101 #ifndef pud_access_permitted
1102 #define pud_access_permitted(pud, write) \
1103 	(pud_present(pud) && (!(write) || pud_write(pud)))
1104 #endif
1105 
1106 #ifndef p4d_access_permitted
1107 #define p4d_access_permitted(p4d, write) \
1108 	(p4d_present(p4d) && (!(write) || p4d_write(p4d)))
1109 #endif
1110 
1111 #ifndef pgd_access_permitted
1112 #define pgd_access_permitted(pgd, write) \
1113 	(pgd_present(pgd) && (!(write) || pgd_write(pgd)))
1114 #endif
1115 
1116 #ifndef __HAVE_ARCH_PMD_SAME
1117 static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
1118 {
1119 	return pmd_val(pmd_a) == pmd_val(pmd_b);
1120 }
1121 #endif
1122 
1123 #ifndef pud_same
1124 static inline int pud_same(pud_t pud_a, pud_t pud_b)
1125 {
1126 	return pud_val(pud_a) == pud_val(pud_b);
1127 }
1128 #define pud_same pud_same
1129 #endif
1130 
1131 #ifndef __HAVE_ARCH_P4D_SAME
1132 static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
1133 {
1134 	return p4d_val(p4d_a) == p4d_val(p4d_b);
1135 }
1136 #endif
1137 
1138 #ifndef __HAVE_ARCH_PGD_SAME
1139 static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
1140 {
1141 	return pgd_val(pgd_a) == pgd_val(pgd_b);
1142 }
1143 #endif
1144 
1145 #ifndef __HAVE_ARCH_DO_SWAP_PAGE
1146 static inline void arch_do_swap_page_nr(struct mm_struct *mm,
1147 				     struct vm_area_struct *vma,
1148 				     unsigned long addr,
1149 				     pte_t pte, pte_t oldpte,
1150 				     int nr)
1151 {
1152 
1153 }
1154 #else
1155 /*
1156  * Some architectures support metadata associated with a page. When a
1157  * page is being swapped out, this metadata must be saved so it can be
1158  * restored when the page is swapped back in. SPARC M7 and newer
1159  * processors support an ADI (Application Data Integrity) tag for the
1160  * page as metadata for the page. arch_do_swap_page() can restore this
1161  * metadata when a page is swapped back in.
1162  */
1163 static inline void arch_do_swap_page_nr(struct mm_struct *mm,
1164 					struct vm_area_struct *vma,
1165 					unsigned long addr,
1166 					pte_t pte, pte_t oldpte,
1167 					int nr)
1168 {
1169 	for (int i = 0; i < nr; i++) {
1170 		arch_do_swap_page(vma->vm_mm, vma, addr + i * PAGE_SIZE,
1171 				pte_advance_pfn(pte, i),
1172 				pte_advance_pfn(oldpte, i));
1173 	}
1174 }
1175 #endif
1176 
1177 #ifndef __HAVE_ARCH_UNMAP_ONE
1178 /*
1179  * Some architectures support metadata associated with a page. When a
1180  * page is being swapped out, this metadata must be saved so it can be
1181  * restored when the page is swapped back in. SPARC M7 and newer
1182  * processors support an ADI (Application Data Integrity) tag for the
1183  * page as metadata for the page. arch_unmap_one() can save this
1184  * metadata on a swap-out of a page.
1185  */
1186 static inline int arch_unmap_one(struct mm_struct *mm,
1187 				  struct vm_area_struct *vma,
1188 				  unsigned long addr,
1189 				  pte_t orig_pte)
1190 {
1191 	return 0;
1192 }
1193 #endif
1194 
1195 /*
1196  * Allow architectures to preserve additional metadata associated with
1197  * swapped-out pages. The corresponding __HAVE_ARCH_SWAP_* macros and function
1198  * prototypes must be defined in the arch-specific asm/pgtable.h file.
1199  */
1200 #ifndef __HAVE_ARCH_PREPARE_TO_SWAP
1201 static inline int arch_prepare_to_swap(struct folio *folio)
1202 {
1203 	return 0;
1204 }
1205 #endif
1206 
1207 #ifndef __HAVE_ARCH_SWAP_INVALIDATE
1208 static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
1209 {
1210 }
1211 
1212 static inline void arch_swap_invalidate_area(int type)
1213 {
1214 }
1215 #endif
1216 
1217 #ifndef __HAVE_ARCH_SWAP_RESTORE
1218 static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
1219 {
1220 }
1221 #endif
1222 
1223 #ifndef __HAVE_ARCH_MOVE_PTE
1224 #define move_pte(pte, old_addr, new_addr)	(pte)
1225 #endif
1226 
1227 #ifndef pte_accessible
1228 # define pte_accessible(mm, pte)	((void)(pte), 1)
1229 #endif
1230 
1231 #ifndef flush_tlb_fix_spurious_fault
1232 #define flush_tlb_fix_spurious_fault(vma, address, ptep) flush_tlb_page(vma, address)
1233 #endif
1234 
1235 /*
1236  * When walking page tables, get the address of the next boundary,
1237  * or the end address of the range if that comes earlier.  Although no
1238  * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
1239  */
1240 
1241 #define pgd_addr_end(addr, end)						\
1242 ({	unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK;	\
1243 	(__boundary - 1 < (end) - 1)? __boundary: (end);		\
1244 })
1245 
1246 #ifndef p4d_addr_end
1247 #define p4d_addr_end(addr, end)						\
1248 ({	unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK;	\
1249 	(__boundary - 1 < (end) - 1)? __boundary: (end);		\
1250 })
1251 #endif
1252 
1253 #ifndef pud_addr_end
1254 #define pud_addr_end(addr, end)						\
1255 ({	unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK;	\
1256 	(__boundary - 1 < (end) - 1)? __boundary: (end);		\
1257 })
1258 #endif
1259 
1260 #ifndef pmd_addr_end
1261 #define pmd_addr_end(addr, end)						\
1262 ({	unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK;	\
1263 	(__boundary - 1 < (end) - 1)? __boundary: (end);		\
1264 })
1265 #endif
1266 
1267 /*
1268  * When walking page tables, we usually want to skip any p?d_none entries;
1269  * and any p?d_bad entries - reporting the error before resetting to none.
1270  * Do the tests inline, but report and clear the bad entry in mm/memory.c.
1271  */
1272 void pgd_clear_bad(pgd_t *);
1273 
1274 #ifndef __PAGETABLE_P4D_FOLDED
1275 void p4d_clear_bad(p4d_t *);
1276 #else
1277 #define p4d_clear_bad(p4d)        do { } while (0)
1278 #endif
1279 
1280 #ifndef __PAGETABLE_PUD_FOLDED
1281 void pud_clear_bad(pud_t *);
1282 #else
1283 #define pud_clear_bad(p4d)        do { } while (0)
1284 #endif
1285 
1286 void pmd_clear_bad(pmd_t *);
1287 
1288 static inline int pgd_none_or_clear_bad(pgd_t *pgd)
1289 {
1290 	if (pgd_none(*pgd))
1291 		return 1;
1292 	if (unlikely(pgd_bad(*pgd))) {
1293 		pgd_clear_bad(pgd);
1294 		return 1;
1295 	}
1296 	return 0;
1297 }
1298 
1299 static inline int p4d_none_or_clear_bad(p4d_t *p4d)
1300 {
1301 	if (p4d_none(*p4d))
1302 		return 1;
1303 	if (unlikely(p4d_bad(*p4d))) {
1304 		p4d_clear_bad(p4d);
1305 		return 1;
1306 	}
1307 	return 0;
1308 }
1309 
1310 static inline int pud_none_or_clear_bad(pud_t *pud)
1311 {
1312 	if (pud_none(*pud))
1313 		return 1;
1314 	if (unlikely(pud_bad(*pud))) {
1315 		pud_clear_bad(pud);
1316 		return 1;
1317 	}
1318 	return 0;
1319 }
1320 
1321 static inline int pmd_none_or_clear_bad(pmd_t *pmd)
1322 {
1323 	if (pmd_none(*pmd))
1324 		return 1;
1325 	if (unlikely(pmd_bad(*pmd))) {
1326 		pmd_clear_bad(pmd);
1327 		return 1;
1328 	}
1329 	return 0;
1330 }
1331 
1332 static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma,
1333 					     unsigned long addr,
1334 					     pte_t *ptep)
1335 {
1336 	/*
1337 	 * Get the current pte state, but zero it out to make it
1338 	 * non-present, preventing the hardware from asynchronously
1339 	 * updating it.
1340 	 */
1341 	return ptep_get_and_clear(vma->vm_mm, addr, ptep);
1342 }
1343 
1344 static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma,
1345 					     unsigned long addr,
1346 					     pte_t *ptep, pte_t pte)
1347 {
1348 	/*
1349 	 * The pte is non-present, so there's no hardware state to
1350 	 * preserve.
1351 	 */
1352 	set_pte_at(vma->vm_mm, addr, ptep, pte);
1353 }
1354 
1355 #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1356 /*
1357  * Start a pte protection read-modify-write transaction, which
1358  * protects against asynchronous hardware modifications to the pte.
1359  * The intention is not to prevent the hardware from making pte
1360  * updates, but to prevent any updates it may make from being lost.
1361  *
1362  * This does not protect against other software modifications of the
1363  * pte; the appropriate pte lock must be held over the transaction.
1364  *
1365  * Note that this interface is intended to be batchable, meaning that
1366  * ptep_modify_prot_commit may not actually update the pte, but merely
1367  * queue the update to be done at some later time.  The update must be
1368  * actually committed before the pte lock is released, however.
1369  */
1370 static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
1371 					   unsigned long addr,
1372 					   pte_t *ptep)
1373 {
1374 	return __ptep_modify_prot_start(vma, addr, ptep);
1375 }
1376 
1377 /*
1378  * Commit an update to a pte, leaving any hardware-controlled bits in
1379  * the PTE unmodified. The pte returned from ptep_modify_prot_start() may
1380  * additionally have young and/or dirty bits set where previously they were not,
1381  * so the updated pte may have these additional changes.
1382  */
1383 static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
1384 					   unsigned long addr,
1385 					   pte_t *ptep, pte_t old_pte, pte_t pte)
1386 {
1387 	__ptep_modify_prot_commit(vma, addr, ptep, pte);
1388 }
1389 #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
1390 
1391 /**
1392  * modify_prot_start_ptes - Start a pte protection read-modify-write transaction
1393  * over a batch of ptes, which protects against asynchronous hardware
1394  * modifications to the ptes. The intention is not to prevent the hardware from
1395  * making pte updates, but to prevent any updates it may make from being lost.
1396  * Please see the comment above ptep_modify_prot_start() for full description.
1397  *
1398  * @vma: The virtual memory area the pages are mapped into.
1399  * @addr: Address the first page is mapped at.
1400  * @ptep: Page table pointer for the first entry.
1401  * @nr: Number of entries.
1402  *
1403  * May be overridden by the architecture; otherwise, implemented as a simple
1404  * loop over ptep_modify_prot_start(), collecting the a/d bits from each pte
1405  * in the batch.
1406  *
1407  * Note that PTE bits in the PTE batch besides the PFN can differ.
1408  *
1409  * Context: The caller holds the page table lock.  The PTEs map consecutive
1410  * pages that belong to the same folio. All other PTE bits must be identical for
1411  * all PTEs in the batch except for young and dirty bits.  The PTEs are all in
1412  * the same PMD.
1413  */
1414 #ifndef modify_prot_start_ptes
1415 static inline pte_t modify_prot_start_ptes(struct vm_area_struct *vma,
1416 		unsigned long addr, pte_t *ptep, unsigned int nr)
1417 {
1418 	pte_t pte, tmp_pte;
1419 
1420 	pte = ptep_modify_prot_start(vma, addr, ptep);
1421 	while (--nr) {
1422 		ptep++;
1423 		addr += PAGE_SIZE;
1424 		tmp_pte = ptep_modify_prot_start(vma, addr, ptep);
1425 		if (pte_dirty(tmp_pte))
1426 			pte = pte_mkdirty(pte);
1427 		if (pte_young(tmp_pte))
1428 			pte = pte_mkyoung(pte);
1429 	}
1430 	return pte;
1431 }
1432 #endif
1433 
1434 /**
1435  * modify_prot_commit_ptes - Commit an update to a batch of ptes, leaving any
1436  * hardware-controlled bits in the PTE unmodified.
1437  *
1438  * @vma: The virtual memory area the pages are mapped into.
1439  * @addr: Address the first page is mapped at.
1440  * @ptep: Page table pointer for the first entry.
1441  * @old_pte: Old page table entry (for the first entry) which is now cleared.
1442  * @pte: New page table entry to be set.
1443  * @nr: Number of entries.
1444  *
1445  * May be overridden by the architecture; otherwise, implemented as a simple
1446  * loop over ptep_modify_prot_commit().
1447  *
1448  * Context: The caller holds the page table lock. The PTEs are all in the same
1449  * PMD. On exit, the set ptes in the batch map the same folio. The ptes set by
1450  * ptep_modify_prot_start() may additionally have young and/or dirty bits set
1451  * where previously they were not, so the updated ptes may have these
1452  * additional changes.
1453  */
1454 #ifndef modify_prot_commit_ptes
1455 static inline void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr,
1456 		pte_t *ptep, pte_t old_pte, pte_t pte, unsigned int nr)
1457 {
1458 	int i;
1459 
1460 	for (i = 0; i < nr; ++i, ++ptep, addr += PAGE_SIZE) {
1461 		ptep_modify_prot_commit(vma, addr, ptep, old_pte, pte);
1462 
1463 		/* Advance PFN only, set same prot */
1464 		old_pte = pte_next_pfn(old_pte);
1465 		pte = pte_next_pfn(pte);
1466 	}
1467 }
1468 #endif
1469 
1470 #endif /* CONFIG_MMU */
1471 
1472 /*
1473  * No-op macros that just return the current protection value. Defined here
1474  * because these macros can be used even if CONFIG_MMU is not defined.
1475  */
1476 
1477 #ifndef pgprot_nx
1478 #define pgprot_nx(prot)	(prot)
1479 #endif
1480 
1481 #ifndef pgprot_noncached
1482 #define pgprot_noncached(prot)	(prot)
1483 #endif
1484 
1485 #ifndef pgprot_writecombine
1486 #define pgprot_writecombine pgprot_noncached
1487 #endif
1488 
1489 #ifndef pgprot_writethrough
1490 #define pgprot_writethrough pgprot_noncached
1491 #endif
1492 
1493 #ifndef pgprot_device
1494 #define pgprot_device pgprot_noncached
1495 #endif
1496 
1497 #ifndef pgprot_mhp
1498 #define pgprot_mhp(prot)	(prot)
1499 #endif
1500 
1501 #ifdef CONFIG_MMU
1502 #ifndef pgprot_modify
1503 #define pgprot_modify pgprot_modify
1504 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
1505 {
1506 	if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
1507 		newprot = pgprot_noncached(newprot);
1508 	if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
1509 		newprot = pgprot_writecombine(newprot);
1510 	if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
1511 		newprot = pgprot_device(newprot);
1512 	return newprot;
1513 }
1514 #endif
1515 #endif /* CONFIG_MMU */
1516 
1517 #ifndef pgprot_encrypted
1518 #define pgprot_encrypted(prot)	(prot)
1519 #endif
1520 
1521 #ifndef pgprot_decrypted
1522 #define pgprot_decrypted(prot)	(prot)
1523 #endif
1524 
1525 /*
1526  * A facility to provide batching of the reload of page tables and
1527  * other process state with the actual context switch code for
1528  * paravirtualized guests.  By convention, only one of the batched
1529  * update (lazy) modes (CPU, MMU) should be active at any given time,
1530  * entry should never be nested, and entry and exits should always be
1531  * paired.  This is for sanity of maintaining and reasoning about the
1532  * kernel code.  In this case, the exit (end of the context switch) is
1533  * in architecture-specific code, and so doesn't need a generic
1534  * definition.
1535  */
1536 #ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
1537 #define arch_start_context_switch(prev)	do {} while (0)
1538 #endif
1539 
1540 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1541 #ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
1542 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1543 {
1544 	return pmd;
1545 }
1546 
1547 static inline int pmd_swp_soft_dirty(pmd_t pmd)
1548 {
1549 	return 0;
1550 }
1551 
1552 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1553 {
1554 	return pmd;
1555 }
1556 #endif
1557 #else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */
1558 static inline int pte_soft_dirty(pte_t pte)
1559 {
1560 	return 0;
1561 }
1562 
1563 static inline int pmd_soft_dirty(pmd_t pmd)
1564 {
1565 	return 0;
1566 }
1567 
1568 static inline pte_t pte_mksoft_dirty(pte_t pte)
1569 {
1570 	return pte;
1571 }
1572 
1573 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
1574 {
1575 	return pmd;
1576 }
1577 
1578 static inline pte_t pte_clear_soft_dirty(pte_t pte)
1579 {
1580 	return pte;
1581 }
1582 
1583 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
1584 {
1585 	return pmd;
1586 }
1587 
1588 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1589 {
1590 	return pte;
1591 }
1592 
1593 static inline int pte_swp_soft_dirty(pte_t pte)
1594 {
1595 	return 0;
1596 }
1597 
1598 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1599 {
1600 	return pte;
1601 }
1602 
1603 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1604 {
1605 	return pmd;
1606 }
1607 
1608 static inline int pmd_swp_soft_dirty(pmd_t pmd)
1609 {
1610 	return 0;
1611 }
1612 
1613 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1614 {
1615 	return pmd;
1616 }
1617 #endif
1618 
1619 #ifndef __HAVE_PFNMAP_TRACKING
1620 /*
1621  * Interfaces that can be used by architecture code to keep track of
1622  * memory type of pfn mappings specified by the remap_pfn_range,
1623  * vmf_insert_pfn.
1624  */
1625 
1626 static inline int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size,
1627 		pgprot_t *prot)
1628 {
1629 	return 0;
1630 }
1631 
1632 static inline int pfnmap_track(unsigned long pfn, unsigned long size,
1633 		pgprot_t *prot)
1634 {
1635 	return 0;
1636 }
1637 
1638 static inline void pfnmap_untrack(unsigned long pfn, unsigned long size)
1639 {
1640 }
1641 #else
1642 /**
1643  * pfnmap_setup_cachemode - setup the cachemode in the pgprot for a pfn range
1644  * @pfn: the start of the pfn range
1645  * @size: the size of the pfn range in bytes
1646  * @prot: the pgprot to modify
1647  *
1648  * Lookup the cachemode for the pfn range starting at @pfn with the size
1649  * @size and store it in @prot, leaving other data in @prot unchanged.
1650  *
1651  * This allows for a hardware implementation to have fine-grained control of
1652  * memory cache behavior at page level granularity. Without a hardware
1653  * implementation, this function does nothing.
1654  *
1655  * Currently there is only one implementation for this - x86 Page Attribute
1656  * Table (PAT). See Documentation/arch/x86/pat.rst for more details.
1657  *
1658  * This function can fail if the pfn range spans pfns that require differing
1659  * cachemodes. If the pfn range was previously verified to have a single
1660  * cachemode, it is sufficient to query only a single pfn. The assumption is
1661  * that this is the case for drivers using the vmf_insert_pfn*() interface.
1662  *
1663  * Returns 0 on success and -EINVAL on error.
1664  */
1665 int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size,
1666 		pgprot_t *prot);
1667 
1668 /**
1669  * pfnmap_track - track a pfn range
1670  * @pfn: the start of the pfn range
1671  * @size: the size of the pfn range in bytes
1672  * @prot: the pgprot to track
1673  *
1674  * Requested the pfn range to be 'tracked' by a hardware implementation and
1675  * setup the cachemode in @prot similar to pfnmap_setup_cachemode().
1676  *
1677  * This allows for fine-grained control of memory cache behaviour at page
1678  * level granularity. Tracking memory this way is persisted across VMA splits
1679  * (VMA merging does not apply for VM_PFNMAP).
1680  *
1681  * Currently, there is only one implementation for this - x86 Page Attribute
1682  * Table (PAT). See Documentation/arch/x86/pat.rst for more details.
1683  *
1684  * Returns 0 on success and -EINVAL on error.
1685  */
1686 int pfnmap_track(unsigned long pfn, unsigned long size, pgprot_t *prot);
1687 
1688 /**
1689  * pfnmap_untrack - untrack a pfn range
1690  * @pfn: the start of the pfn range
1691  * @size: the size of the pfn range in bytes
1692  *
1693  * Untrack a pfn range previously tracked through pfnmap_track().
1694  */
1695 void pfnmap_untrack(unsigned long pfn, unsigned long size);
1696 #endif
1697 
1698 /**
1699  * pfnmap_setup_cachemode_pfn - setup the cachemode in the pgprot for a pfn
1700  * @pfn: the pfn
1701  * @prot: the pgprot to modify
1702  *
1703  * Lookup the cachemode for @pfn and store it in @prot, leaving other
1704  * data in @prot unchanged.
1705  *
1706  * See pfnmap_setup_cachemode() for details.
1707  */
1708 static inline void pfnmap_setup_cachemode_pfn(unsigned long pfn, pgprot_t *prot)
1709 {
1710 	pfnmap_setup_cachemode(pfn, PAGE_SIZE, prot);
1711 }
1712 
1713 #ifdef CONFIG_MMU
1714 #ifdef __HAVE_COLOR_ZERO_PAGE
1715 static inline int is_zero_pfn(unsigned long pfn)
1716 {
1717 	extern unsigned long zero_pfn;
1718 	unsigned long offset_from_zero_pfn = pfn - zero_pfn;
1719 	return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
1720 }
1721 
1722 #define my_zero_pfn(addr)	page_to_pfn(ZERO_PAGE(addr))
1723 
1724 #else
1725 static inline int is_zero_pfn(unsigned long pfn)
1726 {
1727 	extern unsigned long zero_pfn;
1728 	return pfn == zero_pfn;
1729 }
1730 
1731 static inline unsigned long my_zero_pfn(unsigned long addr)
1732 {
1733 	extern unsigned long zero_pfn;
1734 	return zero_pfn;
1735 }
1736 #endif
1737 #else
1738 static inline int is_zero_pfn(unsigned long pfn)
1739 {
1740 	return 0;
1741 }
1742 
1743 static inline unsigned long my_zero_pfn(unsigned long addr)
1744 {
1745 	return 0;
1746 }
1747 #endif /* CONFIG_MMU */
1748 
1749 #ifdef CONFIG_MMU
1750 
1751 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
1752 static inline int pmd_trans_huge(pmd_t pmd)
1753 {
1754 	return 0;
1755 }
1756 #ifndef pmd_write
1757 static inline int pmd_write(pmd_t pmd)
1758 {
1759 	BUG();
1760 	return 0;
1761 }
1762 #endif /* pmd_write */
1763 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1764 
1765 #ifndef pud_write
1766 static inline int pud_write(pud_t pud)
1767 {
1768 	BUG();
1769 	return 0;
1770 }
1771 #endif /* pud_write */
1772 
1773 #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
1774 	!defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
1775 static inline int pud_trans_huge(pud_t pud)
1776 {
1777 	return 0;
1778 }
1779 #endif
1780 
1781 static inline int pud_trans_unstable(pud_t *pud)
1782 {
1783 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
1784 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
1785 	pud_t pudval = READ_ONCE(*pud);
1786 
1787 	if (pud_none(pudval) || pud_trans_huge(pudval))
1788 		return 1;
1789 	if (unlikely(pud_bad(pudval))) {
1790 		pud_clear_bad(pud);
1791 		return 1;
1792 	}
1793 #endif
1794 	return 0;
1795 }
1796 
1797 #ifndef CONFIG_NUMA_BALANCING
1798 /*
1799  * In an inaccessible (PROT_NONE) VMA, pte_protnone() may indicate "yes". It is
1800  * perfectly valid to indicate "no" in that case, which is why our default
1801  * implementation defaults to "always no".
1802  *
1803  * In an accessible VMA, however, pte_protnone() reliably indicates PROT_NONE
1804  * page protection due to NUMA hinting. NUMA hinting faults only apply in
1805  * accessible VMAs.
1806  *
1807  * So, to reliably identify PROT_NONE PTEs that require a NUMA hinting fault,
1808  * looking at the VMA accessibility is sufficient.
1809  */
1810 static inline int pte_protnone(pte_t pte)
1811 {
1812 	return 0;
1813 }
1814 
1815 static inline int pmd_protnone(pmd_t pmd)
1816 {
1817 	return 0;
1818 }
1819 #endif /* CONFIG_NUMA_BALANCING */
1820 
1821 #endif /* CONFIG_MMU */
1822 
1823 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
1824 
1825 #ifndef __PAGETABLE_P4D_FOLDED
1826 int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
1827 void p4d_clear_huge(p4d_t *p4d);
1828 #else
1829 static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
1830 {
1831 	return 0;
1832 }
1833 static inline void p4d_clear_huge(p4d_t *p4d) { }
1834 #endif /* !__PAGETABLE_P4D_FOLDED */
1835 
1836 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
1837 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
1838 int pud_clear_huge(pud_t *pud);
1839 int pmd_clear_huge(pmd_t *pmd);
1840 int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
1841 int pud_free_pmd_page(pud_t *pud, unsigned long addr);
1842 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
1843 #else	/* !CONFIG_HAVE_ARCH_HUGE_VMAP */
1844 static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
1845 {
1846 	return 0;
1847 }
1848 static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1849 {
1850 	return 0;
1851 }
1852 static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1853 {
1854 	return 0;
1855 }
1856 static inline void p4d_clear_huge(p4d_t *p4d) { }
1857 static inline int pud_clear_huge(pud_t *pud)
1858 {
1859 	return 0;
1860 }
1861 static inline int pmd_clear_huge(pmd_t *pmd)
1862 {
1863 	return 0;
1864 }
1865 static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
1866 {
1867 	return 0;
1868 }
1869 static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1870 {
1871 	return 0;
1872 }
1873 static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1874 {
1875 	return 0;
1876 }
1877 #endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
1878 
1879 #ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
1880 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1881 /*
1882  * ARCHes with special requirements for evicting THP backing TLB entries can
1883  * implement this. Otherwise also, it can help optimize normal TLB flush in
1884  * THP regime. Stock flush_tlb_range() typically has optimization to nuke the
1885  * entire TLB if flush span is greater than a threshold, which will
1886  * likely be true for a single huge page. Thus a single THP flush will
1887  * invalidate the entire TLB which is not desirable.
1888  * e.g. see arch/arc: flush_pmd_tlb_range
1889  */
1890 #define flush_pmd_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
1891 #define flush_pud_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
1892 #else
1893 #define flush_pmd_tlb_range(vma, addr, end)	BUILD_BUG()
1894 #define flush_pud_tlb_range(vma, addr, end)	BUILD_BUG()
1895 #endif
1896 #endif
1897 
1898 struct file;
1899 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
1900 			unsigned long size, pgprot_t *vma_prot);
1901 
1902 #ifndef CONFIG_X86_ESPFIX64
1903 static inline void init_espfix_bsp(void) { }
1904 #endif
1905 
1906 extern void __init pgtable_cache_init(void);
1907 
1908 #ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
1909 static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
1910 {
1911 	return true;
1912 }
1913 
1914 static inline bool arch_has_pfn_modify_check(void)
1915 {
1916 	return false;
1917 }
1918 #endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
1919 
1920 /*
1921  * Architecture PAGE_KERNEL_* fallbacks
1922  *
1923  * Some architectures don't define certain PAGE_KERNEL_* flags. This is either
1924  * because they really don't support them, or the port needs to be updated to
1925  * reflect the required functionality. Below are a set of relatively safe
1926  * fallbacks, as best effort, which we can count on in lieu of the architectures
1927  * not defining them on their own yet.
1928  */
1929 
1930 #ifndef PAGE_KERNEL_RO
1931 # define PAGE_KERNEL_RO PAGE_KERNEL
1932 #endif
1933 
1934 #ifndef PAGE_KERNEL_EXEC
1935 # define PAGE_KERNEL_EXEC PAGE_KERNEL
1936 #endif
1937 
1938 /*
1939  * Page Table Modification bits for pgtbl_mod_mask.
1940  *
1941  * These are used by the p?d_alloc_track*() set of functions an in the generic
1942  * vmalloc/ioremap code to track at which page-table levels entries have been
1943  * modified. Based on that the code can better decide when vmalloc and ioremap
1944  * mapping changes need to be synchronized to other page-tables in the system.
1945  */
1946 #define		__PGTBL_PGD_MODIFIED	0
1947 #define		__PGTBL_P4D_MODIFIED	1
1948 #define		__PGTBL_PUD_MODIFIED	2
1949 #define		__PGTBL_PMD_MODIFIED	3
1950 #define		__PGTBL_PTE_MODIFIED	4
1951 
1952 #define		PGTBL_PGD_MODIFIED	BIT(__PGTBL_PGD_MODIFIED)
1953 #define		PGTBL_P4D_MODIFIED	BIT(__PGTBL_P4D_MODIFIED)
1954 #define		PGTBL_PUD_MODIFIED	BIT(__PGTBL_PUD_MODIFIED)
1955 #define		PGTBL_PMD_MODIFIED	BIT(__PGTBL_PMD_MODIFIED)
1956 #define		PGTBL_PTE_MODIFIED	BIT(__PGTBL_PTE_MODIFIED)
1957 
1958 /* Page-Table Modification Mask */
1959 typedef unsigned int pgtbl_mod_mask;
1960 
1961 #endif /* !__ASSEMBLY__ */
1962 
1963 #if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
1964 #ifdef CONFIG_PHYS_ADDR_T_64BIT
1965 /*
1966  * ZSMALLOC needs to know the highest PFN on 32-bit architectures
1967  * with physical address space extension, but falls back to
1968  * BITS_PER_LONG otherwise.
1969  */
1970 #error Missing MAX_POSSIBLE_PHYSMEM_BITS definition
1971 #else
1972 #define MAX_POSSIBLE_PHYSMEM_BITS 32
1973 #endif
1974 #endif
1975 
1976 #ifndef has_transparent_hugepage
1977 #define has_transparent_hugepage() IS_BUILTIN(CONFIG_TRANSPARENT_HUGEPAGE)
1978 #endif
1979 
1980 #ifndef has_transparent_pud_hugepage
1981 #define has_transparent_pud_hugepage() IS_BUILTIN(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
1982 #endif
1983 /*
1984  * On some architectures it depends on the mm if the p4d/pud or pmd
1985  * layer of the page table hierarchy is folded or not.
1986  */
1987 #ifndef mm_p4d_folded
1988 #define mm_p4d_folded(mm)	__is_defined(__PAGETABLE_P4D_FOLDED)
1989 #endif
1990 
1991 #ifndef mm_pud_folded
1992 #define mm_pud_folded(mm)	__is_defined(__PAGETABLE_PUD_FOLDED)
1993 #endif
1994 
1995 #ifndef mm_pmd_folded
1996 #define mm_pmd_folded(mm)	__is_defined(__PAGETABLE_PMD_FOLDED)
1997 #endif
1998 
1999 #ifndef p4d_offset_lockless
2000 #define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address)
2001 #endif
2002 #ifndef pud_offset_lockless
2003 #define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address)
2004 #endif
2005 #ifndef pmd_offset_lockless
2006 #define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address)
2007 #endif
2008 
2009 /*
2010  * pXd_leaf() is the API to check whether a pgtable entry is a huge page
2011  * mapping.  It should work globally across all archs, without any
2012  * dependency on CONFIG_* options.  For architectures that do not support
2013  * huge mappings on specific levels, below fallbacks will be used.
2014  *
2015  * A leaf pgtable entry should always imply the following:
2016  *
2017  * - It is a "present" entry.  IOW, before using this API, please check it
2018  *   with pXd_present() first. NOTE: it may not always mean the "present
2019  *   bit" is set.  For example, PROT_NONE entries are always "present".
2020  *
2021  * - It should _never_ be a swap entry of any type.  Above "present" check
2022  *   should have guarded this, but let's be crystal clear on this.
2023  *
2024  * - It should contain a huge PFN, which points to a huge page larger than
2025  *   PAGE_SIZE of the platform.  The PFN format isn't important here.
2026  *
2027  * - It should cover all kinds of huge mappings (i.e. pXd_trans_huge()
2028  *   or hugetlb mappings).
2029  */
2030 #ifndef pgd_leaf
2031 #define pgd_leaf(x)	false
2032 #endif
2033 #ifndef p4d_leaf
2034 #define p4d_leaf(x)	false
2035 #endif
2036 #ifndef pud_leaf
2037 #define pud_leaf(x)	false
2038 #endif
2039 #ifndef pmd_leaf
2040 #define pmd_leaf(x)	false
2041 #endif
2042 
2043 #ifndef pgd_leaf_size
2044 #define pgd_leaf_size(x) (1ULL << PGDIR_SHIFT)
2045 #endif
2046 #ifndef p4d_leaf_size
2047 #define p4d_leaf_size(x) P4D_SIZE
2048 #endif
2049 #ifndef pud_leaf_size
2050 #define pud_leaf_size(x) PUD_SIZE
2051 #endif
2052 #ifndef pmd_leaf_size
2053 #define pmd_leaf_size(x) PMD_SIZE
2054 #endif
2055 #ifndef __pte_leaf_size
2056 #ifndef pte_leaf_size
2057 #define pte_leaf_size(x) PAGE_SIZE
2058 #endif
2059 #define __pte_leaf_size(x,y) pte_leaf_size(y)
2060 #endif
2061 
2062 /*
2063  * We always define pmd_pfn for all archs as it's used in lots of generic
2064  * code.  Now it happens too for pud_pfn (and can happen for larger
2065  * mappings too in the future; we're not there yet).  Instead of defining
2066  * it for all archs (like pmd_pfn), provide a fallback.
2067  *
2068  * Note that returning 0 here means any arch that didn't define this can
2069  * get severely wrong when it hits a real pud leaf.  It's arch's
2070  * responsibility to properly define it when a huge pud is possible.
2071  */
2072 #ifndef pud_pfn
2073 #define pud_pfn(x) 0
2074 #endif
2075 
2076 /*
2077  * Some architectures have MMUs that are configurable or selectable at boot
2078  * time. These lead to variable PTRS_PER_x. For statically allocated arrays it
2079  * helps to have a static maximum value.
2080  */
2081 
2082 #ifndef MAX_PTRS_PER_PTE
2083 #define MAX_PTRS_PER_PTE PTRS_PER_PTE
2084 #endif
2085 
2086 #ifndef MAX_PTRS_PER_PMD
2087 #define MAX_PTRS_PER_PMD PTRS_PER_PMD
2088 #endif
2089 
2090 #ifndef MAX_PTRS_PER_PUD
2091 #define MAX_PTRS_PER_PUD PTRS_PER_PUD
2092 #endif
2093 
2094 #ifndef MAX_PTRS_PER_P4D
2095 #define MAX_PTRS_PER_P4D PTRS_PER_P4D
2096 #endif
2097 
2098 #ifndef pte_pgprot
2099 #define pte_pgprot(x) ((pgprot_t) {0})
2100 #endif
2101 
2102 #ifndef pmd_pgprot
2103 #define pmd_pgprot(x) ((pgprot_t) {0})
2104 #endif
2105 
2106 #ifndef pud_pgprot
2107 #define pud_pgprot(x) ((pgprot_t) {0})
2108 #endif
2109 
2110 /* description of effects of mapping type and prot in current implementation.
2111  * this is due to the limited x86 page protection hardware.  The expected
2112  * behavior is in parens:
2113  *
2114  * map_type	prot
2115  *		PROT_NONE	PROT_READ	PROT_WRITE	PROT_EXEC
2116  * MAP_SHARED	r: (no) no	r: (yes) yes	r: (no) yes	r: (no) yes
2117  *		w: (no) no	w: (no) no	w: (yes) yes	w: (no) no
2118  *		x: (no) no	x: (no) yes	x: (no) yes	x: (yes) yes
2119  *
2120  * MAP_PRIVATE	r: (no) no	r: (yes) yes	r: (no) yes	r: (no) yes
2121  *		w: (no) no	w: (no) no	w: (copy) copy	w: (no) no
2122  *		x: (no) no	x: (no) yes	x: (no) yes	x: (yes) yes
2123  *
2124  * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
2125  * MAP_PRIVATE (with Enhanced PAN supported):
2126  *								r: (no) no
2127  *								w: (no) no
2128  *								x: (yes) yes
2129  */
2130 #define DECLARE_VM_GET_PAGE_PROT					\
2131 pgprot_t vm_get_page_prot(vm_flags_t vm_flags)				\
2132 {									\
2133 		return protection_map[vm_flags &			\
2134 			(VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)];	\
2135 }									\
2136 EXPORT_SYMBOL(vm_get_page_prot);
2137 
2138 #endif /* _LINUX_PGTABLE_H */
2139