xref: /linux/include/linux/pgtable.h (revision beace86e61e465dba204a268ab3f3377153a4973)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PGTABLE_H
3 #define _LINUX_PGTABLE_H
4 
5 #include <linux/pfn.h>
6 #include <asm/pgtable.h>
7 
8 #define PMD_ORDER	(PMD_SHIFT - PAGE_SHIFT)
9 #define PUD_ORDER	(PUD_SHIFT - PAGE_SHIFT)
10 
11 #ifndef __ASSEMBLY__
12 #ifdef CONFIG_MMU
13 
14 #include <linux/mm_types.h>
15 #include <linux/bug.h>
16 #include <linux/errno.h>
17 #include <asm-generic/pgtable_uffd.h>
18 #include <linux/page_table_check.h>
19 
20 #if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
21 	defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
22 #error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
23 #endif
24 
25 /*
26  * On almost all architectures and configurations, 0 can be used as the
27  * upper ceiling to free_pgtables(): on many architectures it has the same
28  * effect as using TASK_SIZE.  However, there is one configuration which
29  * must impose a more careful limit, to avoid freeing kernel pgtables.
30  */
31 #ifndef USER_PGTABLES_CEILING
32 #define USER_PGTABLES_CEILING	0UL
33 #endif
34 
35 /*
36  * This defines the first usable user address. Platforms
37  * can override its value with custom FIRST_USER_ADDRESS
38  * defined in their respective <asm/pgtable.h>.
39  */
40 #ifndef FIRST_USER_ADDRESS
41 #define FIRST_USER_ADDRESS	0UL
42 #endif
43 
44 /*
45  * This defines the generic helper for accessing PMD page
46  * table page. Although platforms can still override this
47  * via their respective <asm/pgtable.h>.
48  */
49 #ifndef pmd_pgtable
50 #define pmd_pgtable(pmd) pmd_page(pmd)
51 #endif
52 
53 #define pmd_folio(pmd) page_folio(pmd_page(pmd))
54 
55 /*
56  * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD]
57  *
58  * The pXx_index() functions return the index of the entry in the page
59  * table page which would control the given virtual address
60  *
61  * As these functions may be used by the same code for different levels of
62  * the page table folding, they are always available, regardless of
63  * CONFIG_PGTABLE_LEVELS value. For the folded levels they simply return 0
64  * because in such cases PTRS_PER_PxD equals 1.
65  */
66 
67 static inline unsigned long pte_index(unsigned long address)
68 {
69 	return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
70 }
71 
72 #ifndef pmd_index
73 static inline unsigned long pmd_index(unsigned long address)
74 {
75 	return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
76 }
77 #define pmd_index pmd_index
78 #endif
79 
80 #ifndef pud_index
81 static inline unsigned long pud_index(unsigned long address)
82 {
83 	return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
84 }
85 #define pud_index pud_index
86 #endif
87 
88 #ifndef pgd_index
89 /* Must be a compile-time constant, so implement it as a macro */
90 #define pgd_index(a)  (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
91 #endif
92 
93 #ifndef kernel_pte_init
94 static inline void kernel_pte_init(void *addr)
95 {
96 }
97 #define kernel_pte_init kernel_pte_init
98 #endif
99 
100 #ifndef pmd_init
101 static inline void pmd_init(void *addr)
102 {
103 }
104 #define pmd_init pmd_init
105 #endif
106 
107 #ifndef pud_init
108 static inline void pud_init(void *addr)
109 {
110 }
111 #define pud_init pud_init
112 #endif
113 
114 #ifndef pte_offset_kernel
115 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
116 {
117 	return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
118 }
119 #define pte_offset_kernel pte_offset_kernel
120 #endif
121 
122 #ifdef CONFIG_HIGHPTE
123 #define __pte_map(pmd, address) \
124 	((pte_t *)kmap_local_page(pmd_page(*(pmd))) + pte_index((address)))
125 #define pte_unmap(pte)	do {	\
126 	kunmap_local((pte));	\
127 	rcu_read_unlock();	\
128 } while (0)
129 #else
130 static inline pte_t *__pte_map(pmd_t *pmd, unsigned long address)
131 {
132 	return pte_offset_kernel(pmd, address);
133 }
134 static inline void pte_unmap(pte_t *pte)
135 {
136 	rcu_read_unlock();
137 }
138 #endif
139 
140 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable);
141 
142 /* Find an entry in the second-level page table.. */
143 #ifndef pmd_offset
144 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
145 {
146 	return pud_pgtable(*pud) + pmd_index(address);
147 }
148 #define pmd_offset pmd_offset
149 #endif
150 
151 #ifndef pud_offset
152 static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
153 {
154 	return p4d_pgtable(*p4d) + pud_index(address);
155 }
156 #define pud_offset pud_offset
157 #endif
158 
159 static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address)
160 {
161 	return (pgd + pgd_index(address));
162 };
163 
164 /*
165  * a shortcut to get a pgd_t in a given mm
166  */
167 #ifndef pgd_offset
168 #define pgd_offset(mm, address)		pgd_offset_pgd((mm)->pgd, (address))
169 #endif
170 
171 /*
172  * a shortcut which implies the use of the kernel's pgd, instead
173  * of a process's
174  */
175 #define pgd_offset_k(address)		pgd_offset(&init_mm, (address))
176 
177 /*
178  * In many cases it is known that a virtual address is mapped at PMD or PTE
179  * level, so instead of traversing all the page table levels, we can get a
180  * pointer to the PMD entry in user or kernel page table or translate a virtual
181  * address to the pointer in the PTE in the kernel page tables with simple
182  * helpers.
183  */
184 static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va)
185 {
186 	return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va);
187 }
188 
189 static inline pmd_t *pmd_off_k(unsigned long va)
190 {
191 	return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va);
192 }
193 
194 static inline pte_t *virt_to_kpte(unsigned long vaddr)
195 {
196 	pmd_t *pmd = pmd_off_k(vaddr);
197 
198 	return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
199 }
200 
201 #ifndef pmd_young
202 static inline int pmd_young(pmd_t pmd)
203 {
204 	return 0;
205 }
206 #endif
207 
208 #ifndef pmd_dirty
209 static inline int pmd_dirty(pmd_t pmd)
210 {
211 	return 0;
212 }
213 #endif
214 
215 /*
216  * A facility to provide lazy MMU batching.  This allows PTE updates and
217  * page invalidations to be delayed until a call to leave lazy MMU mode
218  * is issued.  Some architectures may benefit from doing this, and it is
219  * beneficial for both shadow and direct mode hypervisors, which may batch
220  * the PTE updates which happen during this window.  Note that using this
221  * interface requires that read hazards be removed from the code.  A read
222  * hazard could result in the direct mode hypervisor case, since the actual
223  * write to the page tables may not yet have taken place, so reads though
224  * a raw PTE pointer after it has been modified are not guaranteed to be
225  * up to date.
226  *
227  * In the general case, no lock is guaranteed to be held between entry and exit
228  * of the lazy mode. So the implementation must assume preemption may be enabled
229  * and cpu migration is possible; it must take steps to be robust against this.
230  * (In practice, for user PTE updates, the appropriate page table lock(s) are
231  * held, but for kernel PTE updates, no lock is held). Nesting is not permitted
232  * and the mode cannot be used in interrupt context.
233  */
234 #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
235 #define arch_enter_lazy_mmu_mode()	do {} while (0)
236 #define arch_leave_lazy_mmu_mode()	do {} while (0)
237 #define arch_flush_lazy_mmu_mode()	do {} while (0)
238 #endif
239 
240 #ifndef pte_batch_hint
241 /**
242  * pte_batch_hint - Number of pages that can be added to batch without scanning.
243  * @ptep: Page table pointer for the entry.
244  * @pte: Page table entry.
245  *
246  * Some architectures know that a set of contiguous ptes all map the same
247  * contiguous memory with the same permissions. In this case, it can provide a
248  * hint to aid pte batching without the core code needing to scan every pte.
249  *
250  * An architecture implementation may ignore the PTE accessed state. Further,
251  * the dirty state must apply atomically to all the PTEs described by the hint.
252  *
253  * May be overridden by the architecture, else pte_batch_hint is always 1.
254  */
255 static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte)
256 {
257 	return 1;
258 }
259 #endif
260 
261 #ifndef pte_advance_pfn
262 static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr)
263 {
264 	return __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT));
265 }
266 #endif
267 
268 #define pte_next_pfn(pte) pte_advance_pfn(pte, 1)
269 
270 #ifndef set_ptes
271 /**
272  * set_ptes - Map consecutive pages to a contiguous range of addresses.
273  * @mm: Address space to map the pages into.
274  * @addr: Address to map the first page at.
275  * @ptep: Page table pointer for the first entry.
276  * @pte: Page table entry for the first page.
277  * @nr: Number of pages to map.
278  *
279  * When nr==1, initial state of pte may be present or not present, and new state
280  * may be present or not present. When nr>1, initial state of all ptes must be
281  * not present, and new state must be present.
282  *
283  * May be overridden by the architecture, or the architecture can define
284  * set_pte() and PFN_PTE_SHIFT.
285  *
286  * Context: The caller holds the page table lock.  The pages all belong
287  * to the same folio.  The PTEs are all in the same PMD.
288  */
289 static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
290 		pte_t *ptep, pte_t pte, unsigned int nr)
291 {
292 	page_table_check_ptes_set(mm, ptep, pte, nr);
293 
294 	for (;;) {
295 		set_pte(ptep, pte);
296 		if (--nr == 0)
297 			break;
298 		ptep++;
299 		pte = pte_next_pfn(pte);
300 	}
301 }
302 #endif
303 #define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)
304 
305 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
306 extern int ptep_set_access_flags(struct vm_area_struct *vma,
307 				 unsigned long address, pte_t *ptep,
308 				 pte_t entry, int dirty);
309 #endif
310 
311 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
312 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
313 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
314 				 unsigned long address, pmd_t *pmdp,
315 				 pmd_t entry, int dirty);
316 extern int pudp_set_access_flags(struct vm_area_struct *vma,
317 				 unsigned long address, pud_t *pudp,
318 				 pud_t entry, int dirty);
319 #else
320 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
321 					unsigned long address, pmd_t *pmdp,
322 					pmd_t entry, int dirty)
323 {
324 	BUILD_BUG();
325 	return 0;
326 }
327 static inline int pudp_set_access_flags(struct vm_area_struct *vma,
328 					unsigned long address, pud_t *pudp,
329 					pud_t entry, int dirty)
330 {
331 	BUILD_BUG();
332 	return 0;
333 }
334 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
335 #endif
336 
337 #ifndef ptep_get
338 static inline pte_t ptep_get(pte_t *ptep)
339 {
340 	return READ_ONCE(*ptep);
341 }
342 #endif
343 
344 #ifndef pmdp_get
345 static inline pmd_t pmdp_get(pmd_t *pmdp)
346 {
347 	return READ_ONCE(*pmdp);
348 }
349 #endif
350 
351 #ifndef pudp_get
352 static inline pud_t pudp_get(pud_t *pudp)
353 {
354 	return READ_ONCE(*pudp);
355 }
356 #endif
357 
358 #ifndef p4dp_get
359 static inline p4d_t p4dp_get(p4d_t *p4dp)
360 {
361 	return READ_ONCE(*p4dp);
362 }
363 #endif
364 
365 #ifndef pgdp_get
366 static inline pgd_t pgdp_get(pgd_t *pgdp)
367 {
368 	return READ_ONCE(*pgdp);
369 }
370 #endif
371 
372 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
373 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
374 					    unsigned long address,
375 					    pte_t *ptep)
376 {
377 	pte_t pte = ptep_get(ptep);
378 	int r = 1;
379 	if (!pte_young(pte))
380 		r = 0;
381 	else
382 		set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
383 	return r;
384 }
385 #endif
386 
387 #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
388 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
389 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
390 					    unsigned long address,
391 					    pmd_t *pmdp)
392 {
393 	pmd_t pmd = *pmdp;
394 	int r = 1;
395 	if (!pmd_young(pmd))
396 		r = 0;
397 	else
398 		set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
399 	return r;
400 }
401 #else
402 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
403 					    unsigned long address,
404 					    pmd_t *pmdp)
405 {
406 	BUILD_BUG();
407 	return 0;
408 }
409 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */
410 #endif
411 
412 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
413 int ptep_clear_flush_young(struct vm_area_struct *vma,
414 			   unsigned long address, pte_t *ptep);
415 #endif
416 
417 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
418 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
419 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
420 				  unsigned long address, pmd_t *pmdp);
421 #else
422 /*
423  * Despite relevant to THP only, this API is called from generic rmap code
424  * under PageTransHuge(), hence needs a dummy implementation for !THP
425  */
426 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
427 					 unsigned long address, pmd_t *pmdp)
428 {
429 	BUILD_BUG();
430 	return 0;
431 }
432 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
433 #endif
434 
435 #ifndef arch_has_hw_nonleaf_pmd_young
436 /*
437  * Return whether the accessed bit in non-leaf PMD entries is supported on the
438  * local CPU.
439  */
440 static inline bool arch_has_hw_nonleaf_pmd_young(void)
441 {
442 	return IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG);
443 }
444 #endif
445 
446 #ifndef arch_has_hw_pte_young
447 /*
448  * Return whether the accessed bit is supported on the local CPU.
449  *
450  * This stub assumes accessing through an old PTE triggers a page fault.
451  * Architectures that automatically set the access bit should overwrite it.
452  */
453 static inline bool arch_has_hw_pte_young(void)
454 {
455 	return IS_ENABLED(CONFIG_ARCH_HAS_HW_PTE_YOUNG);
456 }
457 #endif
458 
459 #ifndef exec_folio_order
460 /*
461  * Returns preferred minimum folio order for executable file-backed memory. Must
462  * be in range [0, PMD_ORDER). Default to order-0.
463  */
464 static inline unsigned int exec_folio_order(void)
465 {
466 	return 0;
467 }
468 #endif
469 
470 #ifndef arch_check_zapped_pte
471 static inline void arch_check_zapped_pte(struct vm_area_struct *vma,
472 					 pte_t pte)
473 {
474 }
475 #endif
476 
477 #ifndef arch_check_zapped_pmd
478 static inline void arch_check_zapped_pmd(struct vm_area_struct *vma,
479 					 pmd_t pmd)
480 {
481 }
482 #endif
483 
484 #ifndef arch_check_zapped_pud
485 static inline void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud)
486 {
487 }
488 #endif
489 
490 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
491 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
492 				       unsigned long address,
493 				       pte_t *ptep)
494 {
495 	pte_t pte = ptep_get(ptep);
496 	pte_clear(mm, address, ptep);
497 	page_table_check_pte_clear(mm, pte);
498 	return pte;
499 }
500 #endif
501 
502 #ifndef clear_young_dirty_ptes
503 /**
504  * clear_young_dirty_ptes - Mark PTEs that map consecutive pages of the
505  *		same folio as old/clean.
506  * @mm: Address space the pages are mapped into.
507  * @addr: Address the first page is mapped at.
508  * @ptep: Page table pointer for the first entry.
509  * @nr: Number of entries to mark old/clean.
510  * @flags: Flags to modify the PTE batch semantics.
511  *
512  * May be overridden by the architecture; otherwise, implemented by
513  * get_and_clear/modify/set for each pte in the range.
514  *
515  * Note that PTE bits in the PTE range besides the PFN can differ. For example,
516  * some PTEs might be write-protected.
517  *
518  * Context: The caller holds the page table lock.  The PTEs map consecutive
519  * pages that belong to the same folio.  The PTEs are all in the same PMD.
520  */
521 static inline void clear_young_dirty_ptes(struct vm_area_struct *vma,
522 					  unsigned long addr, pte_t *ptep,
523 					  unsigned int nr, cydp_t flags)
524 {
525 	pte_t pte;
526 
527 	for (;;) {
528 		if (flags == CYDP_CLEAR_YOUNG)
529 			ptep_test_and_clear_young(vma, addr, ptep);
530 		else {
531 			pte = ptep_get_and_clear(vma->vm_mm, addr, ptep);
532 			if (flags & CYDP_CLEAR_YOUNG)
533 				pte = pte_mkold(pte);
534 			if (flags & CYDP_CLEAR_DIRTY)
535 				pte = pte_mkclean(pte);
536 			set_pte_at(vma->vm_mm, addr, ptep, pte);
537 		}
538 		if (--nr == 0)
539 			break;
540 		ptep++;
541 		addr += PAGE_SIZE;
542 	}
543 }
544 #endif
545 
546 static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
547 			      pte_t *ptep)
548 {
549 	pte_t pte = ptep_get(ptep);
550 
551 	pte_clear(mm, addr, ptep);
552 	/*
553 	 * No need for ptep_get_and_clear(): page table check doesn't care about
554 	 * any bits that could have been set by HW concurrently.
555 	 */
556 	page_table_check_pte_clear(mm, pte);
557 }
558 
559 #ifdef CONFIG_GUP_GET_PXX_LOW_HIGH
560 /*
561  * For walking the pagetables without holding any locks.  Some architectures
562  * (eg x86-32 PAE) cannot load the entries atomically without using expensive
563  * instructions.  We are guaranteed that a PTE will only either go from not
564  * present to present, or present to not present -- it will not switch to a
565  * completely different present page without a TLB flush inbetween; which we
566  * are blocking by holding interrupts off.
567  *
568  * Setting ptes from not present to present goes:
569  *
570  *   ptep->pte_high = h;
571  *   smp_wmb();
572  *   ptep->pte_low = l;
573  *
574  * And present to not present goes:
575  *
576  *   ptep->pte_low = 0;
577  *   smp_wmb();
578  *   ptep->pte_high = 0;
579  *
580  * We must ensure here that the load of pte_low sees 'l' IFF pte_high sees 'h'.
581  * We load pte_high *after* loading pte_low, which ensures we don't see an older
582  * value of pte_high.  *Then* we recheck pte_low, which ensures that we haven't
583  * picked up a changed pte high. We might have gotten rubbish values from
584  * pte_low and pte_high, but we are guaranteed that pte_low will not have the
585  * present bit set *unless* it is 'l'. Because get_user_pages_fast() only
586  * operates on present ptes we're safe.
587  */
588 static inline pte_t ptep_get_lockless(pte_t *ptep)
589 {
590 	pte_t pte;
591 
592 	do {
593 		pte.pte_low = ptep->pte_low;
594 		smp_rmb();
595 		pte.pte_high = ptep->pte_high;
596 		smp_rmb();
597 	} while (unlikely(pte.pte_low != ptep->pte_low));
598 
599 	return pte;
600 }
601 #define ptep_get_lockless ptep_get_lockless
602 
603 #if CONFIG_PGTABLE_LEVELS > 2
604 static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
605 {
606 	pmd_t pmd;
607 
608 	do {
609 		pmd.pmd_low = pmdp->pmd_low;
610 		smp_rmb();
611 		pmd.pmd_high = pmdp->pmd_high;
612 		smp_rmb();
613 	} while (unlikely(pmd.pmd_low != pmdp->pmd_low));
614 
615 	return pmd;
616 }
617 #define pmdp_get_lockless pmdp_get_lockless
618 #define pmdp_get_lockless_sync() tlb_remove_table_sync_one()
619 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
620 #endif /* CONFIG_GUP_GET_PXX_LOW_HIGH */
621 
622 /*
623  * We require that the PTE can be read atomically.
624  */
625 #ifndef ptep_get_lockless
626 static inline pte_t ptep_get_lockless(pte_t *ptep)
627 {
628 	return ptep_get(ptep);
629 }
630 #endif
631 
632 #ifndef pmdp_get_lockless
633 static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
634 {
635 	return pmdp_get(pmdp);
636 }
637 static inline void pmdp_get_lockless_sync(void)
638 {
639 }
640 #endif
641 
642 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
643 #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
644 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
645 					    unsigned long address,
646 					    pmd_t *pmdp)
647 {
648 	pmd_t pmd = *pmdp;
649 
650 	pmd_clear(pmdp);
651 	page_table_check_pmd_clear(mm, pmd);
652 
653 	return pmd;
654 }
655 #endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */
656 #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
657 static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
658 					    unsigned long address,
659 					    pud_t *pudp)
660 {
661 	pud_t pud = *pudp;
662 
663 	pud_clear(pudp);
664 	page_table_check_pud_clear(mm, pud);
665 
666 	return pud;
667 }
668 #endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */
669 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
670 
671 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
672 #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
673 static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
674 					    unsigned long address, pmd_t *pmdp,
675 					    int full)
676 {
677 	return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
678 }
679 #endif
680 
681 #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
682 static inline pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma,
683 					    unsigned long address, pud_t *pudp,
684 					    int full)
685 {
686 	return pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
687 }
688 #endif
689 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
690 
691 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
692 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
693 					    unsigned long address, pte_t *ptep,
694 					    int full)
695 {
696 	return ptep_get_and_clear(mm, address, ptep);
697 }
698 #endif
699 
700 #ifndef get_and_clear_full_ptes
701 /**
702  * get_and_clear_full_ptes - Clear present PTEs that map consecutive pages of
703  *			     the same folio, collecting dirty/accessed bits.
704  * @mm: Address space the pages are mapped into.
705  * @addr: Address the first page is mapped at.
706  * @ptep: Page table pointer for the first entry.
707  * @nr: Number of entries to clear.
708  * @full: Whether we are clearing a full mm.
709  *
710  * May be overridden by the architecture; otherwise, implemented as a simple
711  * loop over ptep_get_and_clear_full(), merging dirty/accessed bits into the
712  * returned PTE.
713  *
714  * Note that PTE bits in the PTE range besides the PFN can differ. For example,
715  * some PTEs might be write-protected.
716  *
717  * Context: The caller holds the page table lock.  The PTEs map consecutive
718  * pages that belong to the same folio.  The PTEs are all in the same PMD.
719  */
720 static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm,
721 		unsigned long addr, pte_t *ptep, unsigned int nr, int full)
722 {
723 	pte_t pte, tmp_pte;
724 
725 	pte = ptep_get_and_clear_full(mm, addr, ptep, full);
726 	while (--nr) {
727 		ptep++;
728 		addr += PAGE_SIZE;
729 		tmp_pte = ptep_get_and_clear_full(mm, addr, ptep, full);
730 		if (pte_dirty(tmp_pte))
731 			pte = pte_mkdirty(pte);
732 		if (pte_young(tmp_pte))
733 			pte = pte_mkyoung(pte);
734 	}
735 	return pte;
736 }
737 #endif
738 
739 #ifndef clear_full_ptes
740 /**
741  * clear_full_ptes - Clear present PTEs that map consecutive pages of the same
742  *		     folio.
743  * @mm: Address space the pages are mapped into.
744  * @addr: Address the first page is mapped at.
745  * @ptep: Page table pointer for the first entry.
746  * @nr: Number of entries to clear.
747  * @full: Whether we are clearing a full mm.
748  *
749  * May be overridden by the architecture; otherwise, implemented as a simple
750  * loop over ptep_get_and_clear_full().
751  *
752  * Note that PTE bits in the PTE range besides the PFN can differ. For example,
753  * some PTEs might be write-protected.
754  *
755  * Context: The caller holds the page table lock.  The PTEs map consecutive
756  * pages that belong to the same folio.  The PTEs are all in the same PMD.
757  */
758 static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr,
759 		pte_t *ptep, unsigned int nr, int full)
760 {
761 	for (;;) {
762 		ptep_get_and_clear_full(mm, addr, ptep, full);
763 		if (--nr == 0)
764 			break;
765 		ptep++;
766 		addr += PAGE_SIZE;
767 	}
768 }
769 #endif
770 
771 /*
772  * If two threads concurrently fault at the same page, the thread that
773  * won the race updates the PTE and its local TLB/Cache. The other thread
774  * gives up, simply does nothing, and continues; on architectures where
775  * software can update TLB,  local TLB can be updated here to avoid next page
776  * fault. This function updates TLB only, do nothing with cache or others.
777  * It is the difference with function update_mmu_cache.
778  */
779 #ifndef update_mmu_tlb_range
780 static inline void update_mmu_tlb_range(struct vm_area_struct *vma,
781 				unsigned long address, pte_t *ptep, unsigned int nr)
782 {
783 }
784 #endif
785 
786 static inline void update_mmu_tlb(struct vm_area_struct *vma,
787 				unsigned long address, pte_t *ptep)
788 {
789 	update_mmu_tlb_range(vma, address, ptep, 1);
790 }
791 
792 /*
793  * Some architectures may be able to avoid expensive synchronization
794  * primitives when modifications are made to PTE's which are already
795  * not present, or in the process of an address space destruction.
796  */
797 #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
798 static inline void pte_clear_not_present_full(struct mm_struct *mm,
799 					      unsigned long address,
800 					      pte_t *ptep,
801 					      int full)
802 {
803 	pte_clear(mm, address, ptep);
804 }
805 #endif
806 
807 #ifndef clear_not_present_full_ptes
808 /**
809  * clear_not_present_full_ptes - Clear multiple not present PTEs which are
810  *				 consecutive in the pgtable.
811  * @mm: Address space the ptes represent.
812  * @addr: Address of the first pte.
813  * @ptep: Page table pointer for the first entry.
814  * @nr: Number of entries to clear.
815  * @full: Whether we are clearing a full mm.
816  *
817  * May be overridden by the architecture; otherwise, implemented as a simple
818  * loop over pte_clear_not_present_full().
819  *
820  * Context: The caller holds the page table lock.  The PTEs are all not present.
821  * The PTEs are all in the same PMD.
822  */
823 static inline void clear_not_present_full_ptes(struct mm_struct *mm,
824 		unsigned long addr, pte_t *ptep, unsigned int nr, int full)
825 {
826 	for (;;) {
827 		pte_clear_not_present_full(mm, addr, ptep, full);
828 		if (--nr == 0)
829 			break;
830 		ptep++;
831 		addr += PAGE_SIZE;
832 	}
833 }
834 #endif
835 
836 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
837 extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
838 			      unsigned long address,
839 			      pte_t *ptep);
840 #endif
841 
842 #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
843 extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
844 			      unsigned long address,
845 			      pmd_t *pmdp);
846 extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
847 			      unsigned long address,
848 			      pud_t *pudp);
849 #endif
850 
851 #ifndef pte_mkwrite
852 static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
853 {
854 	return pte_mkwrite_novma(pte);
855 }
856 #endif
857 
858 #if defined(CONFIG_ARCH_WANT_PMD_MKWRITE) && !defined(pmd_mkwrite)
859 static inline pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
860 {
861 	return pmd_mkwrite_novma(pmd);
862 }
863 #endif
864 
865 #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
866 struct mm_struct;
867 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
868 {
869 	pte_t old_pte = ptep_get(ptep);
870 	set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
871 }
872 #endif
873 
874 #ifndef wrprotect_ptes
875 /**
876  * wrprotect_ptes - Write-protect PTEs that map consecutive pages of the same
877  *		    folio.
878  * @mm: Address space the pages are mapped into.
879  * @addr: Address the first page is mapped at.
880  * @ptep: Page table pointer for the first entry.
881  * @nr: Number of entries to write-protect.
882  *
883  * May be overridden by the architecture; otherwise, implemented as a simple
884  * loop over ptep_set_wrprotect().
885  *
886  * Note that PTE bits in the PTE range besides the PFN can differ. For example,
887  * some PTEs might be write-protected.
888  *
889  * Context: The caller holds the page table lock.  The PTEs map consecutive
890  * pages that belong to the same folio.  The PTEs are all in the same PMD.
891  */
892 static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
893 		pte_t *ptep, unsigned int nr)
894 {
895 	for (;;) {
896 		ptep_set_wrprotect(mm, addr, ptep);
897 		if (--nr == 0)
898 			break;
899 		ptep++;
900 		addr += PAGE_SIZE;
901 	}
902 }
903 #endif
904 
905 /*
906  * On some architectures hardware does not set page access bit when accessing
907  * memory page, it is responsibility of software setting this bit. It brings
908  * out extra page fault penalty to track page access bit. For optimization page
909  * access bit can be set during all page fault flow on these arches.
910  * To be differentiate with macro pte_mkyoung, this macro is used on platforms
911  * where software maintains page access bit.
912  */
913 #ifndef pte_sw_mkyoung
914 static inline pte_t pte_sw_mkyoung(pte_t pte)
915 {
916 	return pte;
917 }
918 #define pte_sw_mkyoung	pte_sw_mkyoung
919 #endif
920 
921 #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
922 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
923 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
924 				      unsigned long address, pmd_t *pmdp)
925 {
926 	pmd_t old_pmd = *pmdp;
927 	set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
928 }
929 #else
930 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
931 				      unsigned long address, pmd_t *pmdp)
932 {
933 	BUILD_BUG();
934 }
935 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
936 #endif
937 #ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
938 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
939 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
940 static inline void pudp_set_wrprotect(struct mm_struct *mm,
941 				      unsigned long address, pud_t *pudp)
942 {
943 	pud_t old_pud = *pudp;
944 
945 	set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
946 }
947 #else
948 static inline void pudp_set_wrprotect(struct mm_struct *mm,
949 				      unsigned long address, pud_t *pudp)
950 {
951 	BUILD_BUG();
952 }
953 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
954 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
955 #endif
956 
957 #ifndef pmdp_collapse_flush
958 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
959 extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
960 				 unsigned long address, pmd_t *pmdp);
961 #else
962 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
963 					unsigned long address,
964 					pmd_t *pmdp)
965 {
966 	BUILD_BUG();
967 	return *pmdp;
968 }
969 #define pmdp_collapse_flush pmdp_collapse_flush
970 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
971 #endif
972 
973 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
974 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
975 				       pgtable_t pgtable);
976 #endif
977 
978 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
979 extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
980 #endif
981 
982 #ifndef arch_needs_pgtable_deposit
983 #define arch_needs_pgtable_deposit() (false)
984 #endif
985 
986 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
987 /*
988  * This is an implementation of pmdp_establish() that is only suitable for an
989  * architecture that doesn't have hardware dirty/accessed bits. In this case we
990  * can't race with CPU which sets these bits and non-atomic approach is fine.
991  */
992 static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
993 		unsigned long address, pmd_t *pmdp, pmd_t pmd)
994 {
995 	pmd_t old_pmd = *pmdp;
996 	set_pmd_at(vma->vm_mm, address, pmdp, pmd);
997 	return old_pmd;
998 }
999 #endif
1000 
1001 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
1002 extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
1003 			    pmd_t *pmdp);
1004 #endif
1005 
1006 #ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD
1007 
1008 /*
1009  * pmdp_invalidate_ad() invalidates the PMD while changing a transparent
1010  * hugepage mapping in the page tables. This function is similar to
1011  * pmdp_invalidate(), but should only be used if the access and dirty bits would
1012  * not be cleared by the software in the new PMD value. The function ensures
1013  * that hardware changes of the access and dirty bits updates would not be lost.
1014  *
1015  * Doing so can allow in certain architectures to avoid a TLB flush in most
1016  * cases. Yet, another TLB flush might be necessary later if the PMD update
1017  * itself requires such flush (e.g., if protection was set to be stricter). Yet,
1018  * even when a TLB flush is needed because of the update, the caller may be able
1019  * to batch these TLB flushing operations, so fewer TLB flush operations are
1020  * needed.
1021  */
1022 extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
1023 				unsigned long address, pmd_t *pmdp);
1024 #endif
1025 
1026 #ifndef __HAVE_ARCH_PTE_SAME
1027 static inline int pte_same(pte_t pte_a, pte_t pte_b)
1028 {
1029 	return pte_val(pte_a) == pte_val(pte_b);
1030 }
1031 #endif
1032 
1033 #ifndef __HAVE_ARCH_PTE_UNUSED
1034 /*
1035  * Some architectures provide facilities to virtualization guests
1036  * so that they can flag allocated pages as unused. This allows the
1037  * host to transparently reclaim unused pages. This function returns
1038  * whether the pte's page is unused.
1039  */
1040 static inline int pte_unused(pte_t pte)
1041 {
1042 	return 0;
1043 }
1044 #endif
1045 
1046 #ifndef pte_access_permitted
1047 #define pte_access_permitted(pte, write) \
1048 	(pte_present(pte) && (!(write) || pte_write(pte)))
1049 #endif
1050 
1051 #ifndef pmd_access_permitted
1052 #define pmd_access_permitted(pmd, write) \
1053 	(pmd_present(pmd) && (!(write) || pmd_write(pmd)))
1054 #endif
1055 
1056 #ifndef pud_access_permitted
1057 #define pud_access_permitted(pud, write) \
1058 	(pud_present(pud) && (!(write) || pud_write(pud)))
1059 #endif
1060 
1061 #ifndef p4d_access_permitted
1062 #define p4d_access_permitted(p4d, write) \
1063 	(p4d_present(p4d) && (!(write) || p4d_write(p4d)))
1064 #endif
1065 
1066 #ifndef pgd_access_permitted
1067 #define pgd_access_permitted(pgd, write) \
1068 	(pgd_present(pgd) && (!(write) || pgd_write(pgd)))
1069 #endif
1070 
1071 #ifndef __HAVE_ARCH_PMD_SAME
1072 static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
1073 {
1074 	return pmd_val(pmd_a) == pmd_val(pmd_b);
1075 }
1076 #endif
1077 
1078 #ifndef pud_same
1079 static inline int pud_same(pud_t pud_a, pud_t pud_b)
1080 {
1081 	return pud_val(pud_a) == pud_val(pud_b);
1082 }
1083 #define pud_same pud_same
1084 #endif
1085 
1086 #ifndef __HAVE_ARCH_P4D_SAME
1087 static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
1088 {
1089 	return p4d_val(p4d_a) == p4d_val(p4d_b);
1090 }
1091 #endif
1092 
1093 #ifndef __HAVE_ARCH_PGD_SAME
1094 static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
1095 {
1096 	return pgd_val(pgd_a) == pgd_val(pgd_b);
1097 }
1098 #endif
1099 
1100 #ifndef __HAVE_ARCH_DO_SWAP_PAGE
1101 static inline void arch_do_swap_page_nr(struct mm_struct *mm,
1102 				     struct vm_area_struct *vma,
1103 				     unsigned long addr,
1104 				     pte_t pte, pte_t oldpte,
1105 				     int nr)
1106 {
1107 
1108 }
1109 #else
1110 /*
1111  * Some architectures support metadata associated with a page. When a
1112  * page is being swapped out, this metadata must be saved so it can be
1113  * restored when the page is swapped back in. SPARC M7 and newer
1114  * processors support an ADI (Application Data Integrity) tag for the
1115  * page as metadata for the page. arch_do_swap_page() can restore this
1116  * metadata when a page is swapped back in.
1117  */
1118 static inline void arch_do_swap_page_nr(struct mm_struct *mm,
1119 					struct vm_area_struct *vma,
1120 					unsigned long addr,
1121 					pte_t pte, pte_t oldpte,
1122 					int nr)
1123 {
1124 	for (int i = 0; i < nr; i++) {
1125 		arch_do_swap_page(vma->vm_mm, vma, addr + i * PAGE_SIZE,
1126 				pte_advance_pfn(pte, i),
1127 				pte_advance_pfn(oldpte, i));
1128 	}
1129 }
1130 #endif
1131 
1132 #ifndef __HAVE_ARCH_UNMAP_ONE
1133 /*
1134  * Some architectures support metadata associated with a page. When a
1135  * page is being swapped out, this metadata must be saved so it can be
1136  * restored when the page is swapped back in. SPARC M7 and newer
1137  * processors support an ADI (Application Data Integrity) tag for the
1138  * page as metadata for the page. arch_unmap_one() can save this
1139  * metadata on a swap-out of a page.
1140  */
1141 static inline int arch_unmap_one(struct mm_struct *mm,
1142 				  struct vm_area_struct *vma,
1143 				  unsigned long addr,
1144 				  pte_t orig_pte)
1145 {
1146 	return 0;
1147 }
1148 #endif
1149 
1150 /*
1151  * Allow architectures to preserve additional metadata associated with
1152  * swapped-out pages. The corresponding __HAVE_ARCH_SWAP_* macros and function
1153  * prototypes must be defined in the arch-specific asm/pgtable.h file.
1154  */
1155 #ifndef __HAVE_ARCH_PREPARE_TO_SWAP
1156 static inline int arch_prepare_to_swap(struct folio *folio)
1157 {
1158 	return 0;
1159 }
1160 #endif
1161 
1162 #ifndef __HAVE_ARCH_SWAP_INVALIDATE
1163 static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
1164 {
1165 }
1166 
1167 static inline void arch_swap_invalidate_area(int type)
1168 {
1169 }
1170 #endif
1171 
1172 #ifndef __HAVE_ARCH_SWAP_RESTORE
1173 static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
1174 {
1175 }
1176 #endif
1177 
1178 #ifndef __HAVE_ARCH_MOVE_PTE
1179 #define move_pte(pte, old_addr, new_addr)	(pte)
1180 #endif
1181 
1182 #ifndef pte_accessible
1183 # define pte_accessible(mm, pte)	((void)(pte), 1)
1184 #endif
1185 
1186 #ifndef flush_tlb_fix_spurious_fault
1187 #define flush_tlb_fix_spurious_fault(vma, address, ptep) flush_tlb_page(vma, address)
1188 #endif
1189 
1190 /*
1191  * When walking page tables, get the address of the next boundary,
1192  * or the end address of the range if that comes earlier.  Although no
1193  * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
1194  */
1195 
1196 #define pgd_addr_end(addr, end)						\
1197 ({	unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK;	\
1198 	(__boundary - 1 < (end) - 1)? __boundary: (end);		\
1199 })
1200 
1201 #ifndef p4d_addr_end
1202 #define p4d_addr_end(addr, end)						\
1203 ({	unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK;	\
1204 	(__boundary - 1 < (end) - 1)? __boundary: (end);		\
1205 })
1206 #endif
1207 
1208 #ifndef pud_addr_end
1209 #define pud_addr_end(addr, end)						\
1210 ({	unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK;	\
1211 	(__boundary - 1 < (end) - 1)? __boundary: (end);		\
1212 })
1213 #endif
1214 
1215 #ifndef pmd_addr_end
1216 #define pmd_addr_end(addr, end)						\
1217 ({	unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK;	\
1218 	(__boundary - 1 < (end) - 1)? __boundary: (end);		\
1219 })
1220 #endif
1221 
1222 /*
1223  * When walking page tables, we usually want to skip any p?d_none entries;
1224  * and any p?d_bad entries - reporting the error before resetting to none.
1225  * Do the tests inline, but report and clear the bad entry in mm/memory.c.
1226  */
1227 void pgd_clear_bad(pgd_t *);
1228 
1229 #ifndef __PAGETABLE_P4D_FOLDED
1230 void p4d_clear_bad(p4d_t *);
1231 #else
1232 #define p4d_clear_bad(p4d)        do { } while (0)
1233 #endif
1234 
1235 #ifndef __PAGETABLE_PUD_FOLDED
1236 void pud_clear_bad(pud_t *);
1237 #else
1238 #define pud_clear_bad(p4d)        do { } while (0)
1239 #endif
1240 
1241 void pmd_clear_bad(pmd_t *);
1242 
1243 static inline int pgd_none_or_clear_bad(pgd_t *pgd)
1244 {
1245 	if (pgd_none(*pgd))
1246 		return 1;
1247 	if (unlikely(pgd_bad(*pgd))) {
1248 		pgd_clear_bad(pgd);
1249 		return 1;
1250 	}
1251 	return 0;
1252 }
1253 
1254 static inline int p4d_none_or_clear_bad(p4d_t *p4d)
1255 {
1256 	if (p4d_none(*p4d))
1257 		return 1;
1258 	if (unlikely(p4d_bad(*p4d))) {
1259 		p4d_clear_bad(p4d);
1260 		return 1;
1261 	}
1262 	return 0;
1263 }
1264 
1265 static inline int pud_none_or_clear_bad(pud_t *pud)
1266 {
1267 	if (pud_none(*pud))
1268 		return 1;
1269 	if (unlikely(pud_bad(*pud))) {
1270 		pud_clear_bad(pud);
1271 		return 1;
1272 	}
1273 	return 0;
1274 }
1275 
1276 static inline int pmd_none_or_clear_bad(pmd_t *pmd)
1277 {
1278 	if (pmd_none(*pmd))
1279 		return 1;
1280 	if (unlikely(pmd_bad(*pmd))) {
1281 		pmd_clear_bad(pmd);
1282 		return 1;
1283 	}
1284 	return 0;
1285 }
1286 
1287 static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma,
1288 					     unsigned long addr,
1289 					     pte_t *ptep)
1290 {
1291 	/*
1292 	 * Get the current pte state, but zero it out to make it
1293 	 * non-present, preventing the hardware from asynchronously
1294 	 * updating it.
1295 	 */
1296 	return ptep_get_and_clear(vma->vm_mm, addr, ptep);
1297 }
1298 
1299 static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma,
1300 					     unsigned long addr,
1301 					     pte_t *ptep, pte_t pte)
1302 {
1303 	/*
1304 	 * The pte is non-present, so there's no hardware state to
1305 	 * preserve.
1306 	 */
1307 	set_pte_at(vma->vm_mm, addr, ptep, pte);
1308 }
1309 
1310 #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1311 /*
1312  * Start a pte protection read-modify-write transaction, which
1313  * protects against asynchronous hardware modifications to the pte.
1314  * The intention is not to prevent the hardware from making pte
1315  * updates, but to prevent any updates it may make from being lost.
1316  *
1317  * This does not protect against other software modifications of the
1318  * pte; the appropriate pte lock must be held over the transaction.
1319  *
1320  * Note that this interface is intended to be batchable, meaning that
1321  * ptep_modify_prot_commit may not actually update the pte, but merely
1322  * queue the update to be done at some later time.  The update must be
1323  * actually committed before the pte lock is released, however.
1324  */
1325 static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
1326 					   unsigned long addr,
1327 					   pte_t *ptep)
1328 {
1329 	return __ptep_modify_prot_start(vma, addr, ptep);
1330 }
1331 
1332 /*
1333  * Commit an update to a pte, leaving any hardware-controlled bits in
1334  * the PTE unmodified. The pte returned from ptep_modify_prot_start() may
1335  * additionally have young and/or dirty bits set where previously they were not,
1336  * so the updated pte may have these additional changes.
1337  */
1338 static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
1339 					   unsigned long addr,
1340 					   pte_t *ptep, pte_t old_pte, pte_t pte)
1341 {
1342 	__ptep_modify_prot_commit(vma, addr, ptep, pte);
1343 }
1344 #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
1345 
1346 /**
1347  * modify_prot_start_ptes - Start a pte protection read-modify-write transaction
1348  * over a batch of ptes, which protects against asynchronous hardware
1349  * modifications to the ptes. The intention is not to prevent the hardware from
1350  * making pte updates, but to prevent any updates it may make from being lost.
1351  * Please see the comment above ptep_modify_prot_start() for full description.
1352  *
1353  * @vma: The virtual memory area the pages are mapped into.
1354  * @addr: Address the first page is mapped at.
1355  * @ptep: Page table pointer for the first entry.
1356  * @nr: Number of entries.
1357  *
1358  * May be overridden by the architecture; otherwise, implemented as a simple
1359  * loop over ptep_modify_prot_start(), collecting the a/d bits from each pte
1360  * in the batch.
1361  *
1362  * Note that PTE bits in the PTE batch besides the PFN can differ.
1363  *
1364  * Context: The caller holds the page table lock.  The PTEs map consecutive
1365  * pages that belong to the same folio. All other PTE bits must be identical for
1366  * all PTEs in the batch except for young and dirty bits.  The PTEs are all in
1367  * the same PMD.
1368  */
1369 #ifndef modify_prot_start_ptes
1370 static inline pte_t modify_prot_start_ptes(struct vm_area_struct *vma,
1371 		unsigned long addr, pte_t *ptep, unsigned int nr)
1372 {
1373 	pte_t pte, tmp_pte;
1374 
1375 	pte = ptep_modify_prot_start(vma, addr, ptep);
1376 	while (--nr) {
1377 		ptep++;
1378 		addr += PAGE_SIZE;
1379 		tmp_pte = ptep_modify_prot_start(vma, addr, ptep);
1380 		if (pte_dirty(tmp_pte))
1381 			pte = pte_mkdirty(pte);
1382 		if (pte_young(tmp_pte))
1383 			pte = pte_mkyoung(pte);
1384 	}
1385 	return pte;
1386 }
1387 #endif
1388 
1389 /**
1390  * modify_prot_commit_ptes - Commit an update to a batch of ptes, leaving any
1391  * hardware-controlled bits in the PTE unmodified.
1392  *
1393  * @vma: The virtual memory area the pages are mapped into.
1394  * @addr: Address the first page is mapped at.
1395  * @ptep: Page table pointer for the first entry.
1396  * @old_pte: Old page table entry (for the first entry) which is now cleared.
1397  * @pte: New page table entry to be set.
1398  * @nr: Number of entries.
1399  *
1400  * May be overridden by the architecture; otherwise, implemented as a simple
1401  * loop over ptep_modify_prot_commit().
1402  *
1403  * Context: The caller holds the page table lock. The PTEs are all in the same
1404  * PMD. On exit, the set ptes in the batch map the same folio. The ptes set by
1405  * ptep_modify_prot_start() may additionally have young and/or dirty bits set
1406  * where previously they were not, so the updated ptes may have these
1407  * additional changes.
1408  */
1409 #ifndef modify_prot_commit_ptes
1410 static inline void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr,
1411 		pte_t *ptep, pte_t old_pte, pte_t pte, unsigned int nr)
1412 {
1413 	int i;
1414 
1415 	for (i = 0; i < nr; ++i, ++ptep, addr += PAGE_SIZE) {
1416 		ptep_modify_prot_commit(vma, addr, ptep, old_pte, pte);
1417 
1418 		/* Advance PFN only, set same prot */
1419 		old_pte = pte_next_pfn(old_pte);
1420 		pte = pte_next_pfn(pte);
1421 	}
1422 }
1423 #endif
1424 
1425 #endif /* CONFIG_MMU */
1426 
1427 /*
1428  * No-op macros that just return the current protection value. Defined here
1429  * because these macros can be used even if CONFIG_MMU is not defined.
1430  */
1431 
1432 #ifndef pgprot_nx
1433 #define pgprot_nx(prot)	(prot)
1434 #endif
1435 
1436 #ifndef pgprot_noncached
1437 #define pgprot_noncached(prot)	(prot)
1438 #endif
1439 
1440 #ifndef pgprot_writecombine
1441 #define pgprot_writecombine pgprot_noncached
1442 #endif
1443 
1444 #ifndef pgprot_writethrough
1445 #define pgprot_writethrough pgprot_noncached
1446 #endif
1447 
1448 #ifndef pgprot_device
1449 #define pgprot_device pgprot_noncached
1450 #endif
1451 
1452 #ifndef pgprot_mhp
1453 #define pgprot_mhp(prot)	(prot)
1454 #endif
1455 
1456 #ifdef CONFIG_MMU
1457 #ifndef pgprot_modify
1458 #define pgprot_modify pgprot_modify
1459 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
1460 {
1461 	if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
1462 		newprot = pgprot_noncached(newprot);
1463 	if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
1464 		newprot = pgprot_writecombine(newprot);
1465 	if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
1466 		newprot = pgprot_device(newprot);
1467 	return newprot;
1468 }
1469 #endif
1470 #endif /* CONFIG_MMU */
1471 
1472 #ifndef pgprot_encrypted
1473 #define pgprot_encrypted(prot)	(prot)
1474 #endif
1475 
1476 #ifndef pgprot_decrypted
1477 #define pgprot_decrypted(prot)	(prot)
1478 #endif
1479 
1480 /*
1481  * A facility to provide batching of the reload of page tables and
1482  * other process state with the actual context switch code for
1483  * paravirtualized guests.  By convention, only one of the batched
1484  * update (lazy) modes (CPU, MMU) should be active at any given time,
1485  * entry should never be nested, and entry and exits should always be
1486  * paired.  This is for sanity of maintaining and reasoning about the
1487  * kernel code.  In this case, the exit (end of the context switch) is
1488  * in architecture-specific code, and so doesn't need a generic
1489  * definition.
1490  */
1491 #ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
1492 #define arch_start_context_switch(prev)	do {} while (0)
1493 #endif
1494 
1495 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1496 #ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
1497 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1498 {
1499 	return pmd;
1500 }
1501 
1502 static inline int pmd_swp_soft_dirty(pmd_t pmd)
1503 {
1504 	return 0;
1505 }
1506 
1507 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1508 {
1509 	return pmd;
1510 }
1511 #endif
1512 #else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */
1513 static inline int pte_soft_dirty(pte_t pte)
1514 {
1515 	return 0;
1516 }
1517 
1518 static inline int pmd_soft_dirty(pmd_t pmd)
1519 {
1520 	return 0;
1521 }
1522 
1523 static inline pte_t pte_mksoft_dirty(pte_t pte)
1524 {
1525 	return pte;
1526 }
1527 
1528 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
1529 {
1530 	return pmd;
1531 }
1532 
1533 static inline pte_t pte_clear_soft_dirty(pte_t pte)
1534 {
1535 	return pte;
1536 }
1537 
1538 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
1539 {
1540 	return pmd;
1541 }
1542 
1543 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1544 {
1545 	return pte;
1546 }
1547 
1548 static inline int pte_swp_soft_dirty(pte_t pte)
1549 {
1550 	return 0;
1551 }
1552 
1553 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1554 {
1555 	return pte;
1556 }
1557 
1558 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1559 {
1560 	return pmd;
1561 }
1562 
1563 static inline int pmd_swp_soft_dirty(pmd_t pmd)
1564 {
1565 	return 0;
1566 }
1567 
1568 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1569 {
1570 	return pmd;
1571 }
1572 #endif
1573 
1574 #ifndef __HAVE_PFNMAP_TRACKING
1575 /*
1576  * Interfaces that can be used by architecture code to keep track of
1577  * memory type of pfn mappings specified by the remap_pfn_range,
1578  * vmf_insert_pfn.
1579  */
1580 
1581 static inline int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size,
1582 		pgprot_t *prot)
1583 {
1584 	return 0;
1585 }
1586 
1587 static inline int pfnmap_track(unsigned long pfn, unsigned long size,
1588 		pgprot_t *prot)
1589 {
1590 	return 0;
1591 }
1592 
1593 static inline void pfnmap_untrack(unsigned long pfn, unsigned long size)
1594 {
1595 }
1596 #else
1597 /**
1598  * pfnmap_setup_cachemode - setup the cachemode in the pgprot for a pfn range
1599  * @pfn: the start of the pfn range
1600  * @size: the size of the pfn range in bytes
1601  * @prot: the pgprot to modify
1602  *
1603  * Lookup the cachemode for the pfn range starting at @pfn with the size
1604  * @size and store it in @prot, leaving other data in @prot unchanged.
1605  *
1606  * This allows for a hardware implementation to have fine-grained control of
1607  * memory cache behavior at page level granularity. Without a hardware
1608  * implementation, this function does nothing.
1609  *
1610  * Currently there is only one implementation for this - x86 Page Attribute
1611  * Table (PAT). See Documentation/arch/x86/pat.rst for more details.
1612  *
1613  * This function can fail if the pfn range spans pfns that require differing
1614  * cachemodes. If the pfn range was previously verified to have a single
1615  * cachemode, it is sufficient to query only a single pfn. The assumption is
1616  * that this is the case for drivers using the vmf_insert_pfn*() interface.
1617  *
1618  * Returns 0 on success and -EINVAL on error.
1619  */
1620 int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size,
1621 		pgprot_t *prot);
1622 
1623 /**
1624  * pfnmap_track - track a pfn range
1625  * @pfn: the start of the pfn range
1626  * @size: the size of the pfn range in bytes
1627  * @prot: the pgprot to track
1628  *
1629  * Requested the pfn range to be 'tracked' by a hardware implementation and
1630  * setup the cachemode in @prot similar to pfnmap_setup_cachemode().
1631  *
1632  * This allows for fine-grained control of memory cache behaviour at page
1633  * level granularity. Tracking memory this way is persisted across VMA splits
1634  * (VMA merging does not apply for VM_PFNMAP).
1635  *
1636  * Currently, there is only one implementation for this - x86 Page Attribute
1637  * Table (PAT). See Documentation/arch/x86/pat.rst for more details.
1638  *
1639  * Returns 0 on success and -EINVAL on error.
1640  */
1641 int pfnmap_track(unsigned long pfn, unsigned long size, pgprot_t *prot);
1642 
1643 /**
1644  * pfnmap_untrack - untrack a pfn range
1645  * @pfn: the start of the pfn range
1646  * @size: the size of the pfn range in bytes
1647  *
1648  * Untrack a pfn range previously tracked through pfnmap_track().
1649  */
1650 void pfnmap_untrack(unsigned long pfn, unsigned long size);
1651 #endif
1652 
1653 /**
1654  * pfnmap_setup_cachemode_pfn - setup the cachemode in the pgprot for a pfn
1655  * @pfn: the pfn
1656  * @prot: the pgprot to modify
1657  *
1658  * Lookup the cachemode for @pfn and store it in @prot, leaving other
1659  * data in @prot unchanged.
1660  *
1661  * See pfnmap_setup_cachemode() for details.
1662  */
1663 static inline void pfnmap_setup_cachemode_pfn(unsigned long pfn, pgprot_t *prot)
1664 {
1665 	pfnmap_setup_cachemode(pfn, PAGE_SIZE, prot);
1666 }
1667 
1668 #ifdef CONFIG_MMU
1669 #ifdef __HAVE_COLOR_ZERO_PAGE
1670 static inline int is_zero_pfn(unsigned long pfn)
1671 {
1672 	extern unsigned long zero_pfn;
1673 	unsigned long offset_from_zero_pfn = pfn - zero_pfn;
1674 	return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
1675 }
1676 
1677 #define my_zero_pfn(addr)	page_to_pfn(ZERO_PAGE(addr))
1678 
1679 #else
1680 static inline int is_zero_pfn(unsigned long pfn)
1681 {
1682 	extern unsigned long zero_pfn;
1683 	return pfn == zero_pfn;
1684 }
1685 
1686 static inline unsigned long my_zero_pfn(unsigned long addr)
1687 {
1688 	extern unsigned long zero_pfn;
1689 	return zero_pfn;
1690 }
1691 #endif
1692 #else
1693 static inline int is_zero_pfn(unsigned long pfn)
1694 {
1695 	return 0;
1696 }
1697 
1698 static inline unsigned long my_zero_pfn(unsigned long addr)
1699 {
1700 	return 0;
1701 }
1702 #endif /* CONFIG_MMU */
1703 
1704 #ifdef CONFIG_MMU
1705 
1706 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
1707 static inline int pmd_trans_huge(pmd_t pmd)
1708 {
1709 	return 0;
1710 }
1711 #ifndef pmd_write
1712 static inline int pmd_write(pmd_t pmd)
1713 {
1714 	BUG();
1715 	return 0;
1716 }
1717 #endif /* pmd_write */
1718 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1719 
1720 #ifndef pud_write
1721 static inline int pud_write(pud_t pud)
1722 {
1723 	BUG();
1724 	return 0;
1725 }
1726 #endif /* pud_write */
1727 
1728 #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
1729 	!defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
1730 static inline int pud_trans_huge(pud_t pud)
1731 {
1732 	return 0;
1733 }
1734 #endif
1735 
1736 static inline int pud_trans_unstable(pud_t *pud)
1737 {
1738 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
1739 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
1740 	pud_t pudval = READ_ONCE(*pud);
1741 
1742 	if (pud_none(pudval) || pud_trans_huge(pudval))
1743 		return 1;
1744 	if (unlikely(pud_bad(pudval))) {
1745 		pud_clear_bad(pud);
1746 		return 1;
1747 	}
1748 #endif
1749 	return 0;
1750 }
1751 
1752 #ifndef CONFIG_NUMA_BALANCING
1753 /*
1754  * In an inaccessible (PROT_NONE) VMA, pte_protnone() may indicate "yes". It is
1755  * perfectly valid to indicate "no" in that case, which is why our default
1756  * implementation defaults to "always no".
1757  *
1758  * In an accessible VMA, however, pte_protnone() reliably indicates PROT_NONE
1759  * page protection due to NUMA hinting. NUMA hinting faults only apply in
1760  * accessible VMAs.
1761  *
1762  * So, to reliably identify PROT_NONE PTEs that require a NUMA hinting fault,
1763  * looking at the VMA accessibility is sufficient.
1764  */
1765 static inline int pte_protnone(pte_t pte)
1766 {
1767 	return 0;
1768 }
1769 
1770 static inline int pmd_protnone(pmd_t pmd)
1771 {
1772 	return 0;
1773 }
1774 #endif /* CONFIG_NUMA_BALANCING */
1775 
1776 #endif /* CONFIG_MMU */
1777 
1778 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
1779 
1780 #ifndef __PAGETABLE_P4D_FOLDED
1781 int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
1782 void p4d_clear_huge(p4d_t *p4d);
1783 #else
1784 static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
1785 {
1786 	return 0;
1787 }
1788 static inline void p4d_clear_huge(p4d_t *p4d) { }
1789 #endif /* !__PAGETABLE_P4D_FOLDED */
1790 
1791 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
1792 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
1793 int pud_clear_huge(pud_t *pud);
1794 int pmd_clear_huge(pmd_t *pmd);
1795 int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
1796 int pud_free_pmd_page(pud_t *pud, unsigned long addr);
1797 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
1798 #else	/* !CONFIG_HAVE_ARCH_HUGE_VMAP */
1799 static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
1800 {
1801 	return 0;
1802 }
1803 static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1804 {
1805 	return 0;
1806 }
1807 static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1808 {
1809 	return 0;
1810 }
1811 static inline void p4d_clear_huge(p4d_t *p4d) { }
1812 static inline int pud_clear_huge(pud_t *pud)
1813 {
1814 	return 0;
1815 }
1816 static inline int pmd_clear_huge(pmd_t *pmd)
1817 {
1818 	return 0;
1819 }
1820 static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
1821 {
1822 	return 0;
1823 }
1824 static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1825 {
1826 	return 0;
1827 }
1828 static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1829 {
1830 	return 0;
1831 }
1832 #endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
1833 
1834 #ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
1835 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1836 /*
1837  * ARCHes with special requirements for evicting THP backing TLB entries can
1838  * implement this. Otherwise also, it can help optimize normal TLB flush in
1839  * THP regime. Stock flush_tlb_range() typically has optimization to nuke the
1840  * entire TLB if flush span is greater than a threshold, which will
1841  * likely be true for a single huge page. Thus a single THP flush will
1842  * invalidate the entire TLB which is not desirable.
1843  * e.g. see arch/arc: flush_pmd_tlb_range
1844  */
1845 #define flush_pmd_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
1846 #define flush_pud_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
1847 #else
1848 #define flush_pmd_tlb_range(vma, addr, end)	BUILD_BUG()
1849 #define flush_pud_tlb_range(vma, addr, end)	BUILD_BUG()
1850 #endif
1851 #endif
1852 
1853 struct file;
1854 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
1855 			unsigned long size, pgprot_t *vma_prot);
1856 
1857 #ifndef CONFIG_X86_ESPFIX64
1858 static inline void init_espfix_bsp(void) { }
1859 #endif
1860 
1861 extern void __init pgtable_cache_init(void);
1862 
1863 #ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
1864 static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
1865 {
1866 	return true;
1867 }
1868 
1869 static inline bool arch_has_pfn_modify_check(void)
1870 {
1871 	return false;
1872 }
1873 #endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
1874 
1875 /*
1876  * Architecture PAGE_KERNEL_* fallbacks
1877  *
1878  * Some architectures don't define certain PAGE_KERNEL_* flags. This is either
1879  * because they really don't support them, or the port needs to be updated to
1880  * reflect the required functionality. Below are a set of relatively safe
1881  * fallbacks, as best effort, which we can count on in lieu of the architectures
1882  * not defining them on their own yet.
1883  */
1884 
1885 #ifndef PAGE_KERNEL_RO
1886 # define PAGE_KERNEL_RO PAGE_KERNEL
1887 #endif
1888 
1889 #ifndef PAGE_KERNEL_EXEC
1890 # define PAGE_KERNEL_EXEC PAGE_KERNEL
1891 #endif
1892 
1893 /*
1894  * Page Table Modification bits for pgtbl_mod_mask.
1895  *
1896  * These are used by the p?d_alloc_track*() set of functions an in the generic
1897  * vmalloc/ioremap code to track at which page-table levels entries have been
1898  * modified. Based on that the code can better decide when vmalloc and ioremap
1899  * mapping changes need to be synchronized to other page-tables in the system.
1900  */
1901 #define		__PGTBL_PGD_MODIFIED	0
1902 #define		__PGTBL_P4D_MODIFIED	1
1903 #define		__PGTBL_PUD_MODIFIED	2
1904 #define		__PGTBL_PMD_MODIFIED	3
1905 #define		__PGTBL_PTE_MODIFIED	4
1906 
1907 #define		PGTBL_PGD_MODIFIED	BIT(__PGTBL_PGD_MODIFIED)
1908 #define		PGTBL_P4D_MODIFIED	BIT(__PGTBL_P4D_MODIFIED)
1909 #define		PGTBL_PUD_MODIFIED	BIT(__PGTBL_PUD_MODIFIED)
1910 #define		PGTBL_PMD_MODIFIED	BIT(__PGTBL_PMD_MODIFIED)
1911 #define		PGTBL_PTE_MODIFIED	BIT(__PGTBL_PTE_MODIFIED)
1912 
1913 /* Page-Table Modification Mask */
1914 typedef unsigned int pgtbl_mod_mask;
1915 
1916 #endif /* !__ASSEMBLY__ */
1917 
1918 #if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
1919 #ifdef CONFIG_PHYS_ADDR_T_64BIT
1920 /*
1921  * ZSMALLOC needs to know the highest PFN on 32-bit architectures
1922  * with physical address space extension, but falls back to
1923  * BITS_PER_LONG otherwise.
1924  */
1925 #error Missing MAX_POSSIBLE_PHYSMEM_BITS definition
1926 #else
1927 #define MAX_POSSIBLE_PHYSMEM_BITS 32
1928 #endif
1929 #endif
1930 
1931 #ifndef has_transparent_hugepage
1932 #define has_transparent_hugepage() IS_BUILTIN(CONFIG_TRANSPARENT_HUGEPAGE)
1933 #endif
1934 
1935 #ifndef has_transparent_pud_hugepage
1936 #define has_transparent_pud_hugepage() IS_BUILTIN(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
1937 #endif
1938 /*
1939  * On some architectures it depends on the mm if the p4d/pud or pmd
1940  * layer of the page table hierarchy is folded or not.
1941  */
1942 #ifndef mm_p4d_folded
1943 #define mm_p4d_folded(mm)	__is_defined(__PAGETABLE_P4D_FOLDED)
1944 #endif
1945 
1946 #ifndef mm_pud_folded
1947 #define mm_pud_folded(mm)	__is_defined(__PAGETABLE_PUD_FOLDED)
1948 #endif
1949 
1950 #ifndef mm_pmd_folded
1951 #define mm_pmd_folded(mm)	__is_defined(__PAGETABLE_PMD_FOLDED)
1952 #endif
1953 
1954 #ifndef p4d_offset_lockless
1955 #define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address)
1956 #endif
1957 #ifndef pud_offset_lockless
1958 #define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address)
1959 #endif
1960 #ifndef pmd_offset_lockless
1961 #define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address)
1962 #endif
1963 
1964 /*
1965  * pXd_leaf() is the API to check whether a pgtable entry is a huge page
1966  * mapping.  It should work globally across all archs, without any
1967  * dependency on CONFIG_* options.  For architectures that do not support
1968  * huge mappings on specific levels, below fallbacks will be used.
1969  *
1970  * A leaf pgtable entry should always imply the following:
1971  *
1972  * - It is a "present" entry.  IOW, before using this API, please check it
1973  *   with pXd_present() first. NOTE: it may not always mean the "present
1974  *   bit" is set.  For example, PROT_NONE entries are always "present".
1975  *
1976  * - It should _never_ be a swap entry of any type.  Above "present" check
1977  *   should have guarded this, but let's be crystal clear on this.
1978  *
1979  * - It should contain a huge PFN, which points to a huge page larger than
1980  *   PAGE_SIZE of the platform.  The PFN format isn't important here.
1981  *
1982  * - It should cover all kinds of huge mappings (i.e. pXd_trans_huge()
1983  *   or hugetlb mappings).
1984  */
1985 #ifndef pgd_leaf
1986 #define pgd_leaf(x)	false
1987 #endif
1988 #ifndef p4d_leaf
1989 #define p4d_leaf(x)	false
1990 #endif
1991 #ifndef pud_leaf
1992 #define pud_leaf(x)	false
1993 #endif
1994 #ifndef pmd_leaf
1995 #define pmd_leaf(x)	false
1996 #endif
1997 
1998 #ifndef pgd_leaf_size
1999 #define pgd_leaf_size(x) (1ULL << PGDIR_SHIFT)
2000 #endif
2001 #ifndef p4d_leaf_size
2002 #define p4d_leaf_size(x) P4D_SIZE
2003 #endif
2004 #ifndef pud_leaf_size
2005 #define pud_leaf_size(x) PUD_SIZE
2006 #endif
2007 #ifndef pmd_leaf_size
2008 #define pmd_leaf_size(x) PMD_SIZE
2009 #endif
2010 #ifndef __pte_leaf_size
2011 #ifndef pte_leaf_size
2012 #define pte_leaf_size(x) PAGE_SIZE
2013 #endif
2014 #define __pte_leaf_size(x,y) pte_leaf_size(y)
2015 #endif
2016 
2017 /*
2018  * We always define pmd_pfn for all archs as it's used in lots of generic
2019  * code.  Now it happens too for pud_pfn (and can happen for larger
2020  * mappings too in the future; we're not there yet).  Instead of defining
2021  * it for all archs (like pmd_pfn), provide a fallback.
2022  *
2023  * Note that returning 0 here means any arch that didn't define this can
2024  * get severely wrong when it hits a real pud leaf.  It's arch's
2025  * responsibility to properly define it when a huge pud is possible.
2026  */
2027 #ifndef pud_pfn
2028 #define pud_pfn(x) 0
2029 #endif
2030 
2031 /*
2032  * Some architectures have MMUs that are configurable or selectable at boot
2033  * time. These lead to variable PTRS_PER_x. For statically allocated arrays it
2034  * helps to have a static maximum value.
2035  */
2036 
2037 #ifndef MAX_PTRS_PER_PTE
2038 #define MAX_PTRS_PER_PTE PTRS_PER_PTE
2039 #endif
2040 
2041 #ifndef MAX_PTRS_PER_PMD
2042 #define MAX_PTRS_PER_PMD PTRS_PER_PMD
2043 #endif
2044 
2045 #ifndef MAX_PTRS_PER_PUD
2046 #define MAX_PTRS_PER_PUD PTRS_PER_PUD
2047 #endif
2048 
2049 #ifndef MAX_PTRS_PER_P4D
2050 #define MAX_PTRS_PER_P4D PTRS_PER_P4D
2051 #endif
2052 
2053 #ifndef pte_pgprot
2054 #define pte_pgprot(x) ((pgprot_t) {0})
2055 #endif
2056 
2057 #ifndef pmd_pgprot
2058 #define pmd_pgprot(x) ((pgprot_t) {0})
2059 #endif
2060 
2061 #ifndef pud_pgprot
2062 #define pud_pgprot(x) ((pgprot_t) {0})
2063 #endif
2064 
2065 /* description of effects of mapping type and prot in current implementation.
2066  * this is due to the limited x86 page protection hardware.  The expected
2067  * behavior is in parens:
2068  *
2069  * map_type	prot
2070  *		PROT_NONE	PROT_READ	PROT_WRITE	PROT_EXEC
2071  * MAP_SHARED	r: (no) no	r: (yes) yes	r: (no) yes	r: (no) yes
2072  *		w: (no) no	w: (no) no	w: (yes) yes	w: (no) no
2073  *		x: (no) no	x: (no) yes	x: (no) yes	x: (yes) yes
2074  *
2075  * MAP_PRIVATE	r: (no) no	r: (yes) yes	r: (no) yes	r: (no) yes
2076  *		w: (no) no	w: (no) no	w: (copy) copy	w: (no) no
2077  *		x: (no) no	x: (no) yes	x: (no) yes	x: (yes) yes
2078  *
2079  * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
2080  * MAP_PRIVATE (with Enhanced PAN supported):
2081  *								r: (no) no
2082  *								w: (no) no
2083  *								x: (yes) yes
2084  */
2085 #define DECLARE_VM_GET_PAGE_PROT					\
2086 pgprot_t vm_get_page_prot(vm_flags_t vm_flags)				\
2087 {									\
2088 		return protection_map[vm_flags &			\
2089 			(VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)];	\
2090 }									\
2091 EXPORT_SYMBOL(vm_get_page_prot);
2092 
2093 #endif /* _LINUX_PGTABLE_H */
2094