xref: /linux/mm/pgtable-generic.c (revision 86ec2da037b85436b63afe3df43ed48fa0e52b0e)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2e2cda322SAndrea Arcangeli /*
3e2cda322SAndrea Arcangeli  *  mm/pgtable-generic.c
4e2cda322SAndrea Arcangeli  *
5e2cda322SAndrea Arcangeli  *  Generic pgtable methods declared in asm-generic/pgtable.h
6e2cda322SAndrea Arcangeli  *
7e2cda322SAndrea Arcangeli  *  Copyright (C) 2010  Linus Torvalds
8e2cda322SAndrea Arcangeli  */
9e2cda322SAndrea Arcangeli 
10f95ba941SAndrew Morton #include <linux/pagemap.h>
11a31acd3eSPeter Zijlstra #include <linux/hugetlb.h>
12e2cda322SAndrea Arcangeli #include <asm/tlb.h>
13e2cda322SAndrea Arcangeli #include <asm-generic/pgtable.h>
14e2cda322SAndrea Arcangeli 
15bc4b4448SJoonsoo Kim /*
16bc4b4448SJoonsoo Kim  * If a p?d_bad entry is found while walking page tables, report
17bc4b4448SJoonsoo Kim  * the error, before resetting entry to p?d_none.  Usually (but
18bc4b4448SJoonsoo Kim  * very seldom) called out from the p?d_none_or_clear_bad macros.
19bc4b4448SJoonsoo Kim  */
20bc4b4448SJoonsoo Kim 
21bc4b4448SJoonsoo Kim void pgd_clear_bad(pgd_t *pgd)
22bc4b4448SJoonsoo Kim {
23bc4b4448SJoonsoo Kim 	pgd_ERROR(*pgd);
24bc4b4448SJoonsoo Kim 	pgd_clear(pgd);
25bc4b4448SJoonsoo Kim }
26bc4b4448SJoonsoo Kim 
27f2400abcSVineet Gupta #ifndef __PAGETABLE_P4D_FOLDED
28c2febafcSKirill A. Shutemov void p4d_clear_bad(p4d_t *p4d)
29c2febafcSKirill A. Shutemov {
30c2febafcSKirill A. Shutemov 	p4d_ERROR(*p4d);
31c2febafcSKirill A. Shutemov 	p4d_clear(p4d);
32c2febafcSKirill A. Shutemov }
33f2400abcSVineet Gupta #endif
34c2febafcSKirill A. Shutemov 
35f2400abcSVineet Gupta #ifndef __PAGETABLE_PUD_FOLDED
36bc4b4448SJoonsoo Kim void pud_clear_bad(pud_t *pud)
37bc4b4448SJoonsoo Kim {
38bc4b4448SJoonsoo Kim 	pud_ERROR(*pud);
39bc4b4448SJoonsoo Kim 	pud_clear(pud);
40bc4b4448SJoonsoo Kim }
41f2400abcSVineet Gupta #endif
42bc4b4448SJoonsoo Kim 
43f2400abcSVineet Gupta /*
44f2400abcSVineet Gupta  * Note that the pmd variant below can't be stub'ed out just as for p4d/pud
45f2400abcSVineet Gupta  * above. pmd folding is special and typically pmd_* macros refer to upper
46f2400abcSVineet Gupta  * level even when folded
47f2400abcSVineet Gupta  */
48bc4b4448SJoonsoo Kim void pmd_clear_bad(pmd_t *pmd)
49bc4b4448SJoonsoo Kim {
50bc4b4448SJoonsoo Kim 	pmd_ERROR(*pmd);
51bc4b4448SJoonsoo Kim 	pmd_clear(pmd);
52bc4b4448SJoonsoo Kim }
53bc4b4448SJoonsoo Kim 
54e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
55e2cda322SAndrea Arcangeli /*
56cef23d9dSRik van Riel  * Only sets the access flags (dirty, accessed), as well as write
57cef23d9dSRik van Riel  * permission. Furthermore, we know it always gets set to a "more
58e2cda322SAndrea Arcangeli  * permissive" setting, which allows most architectures to optimize
59e2cda322SAndrea Arcangeli  * this. We return whether the PTE actually changed, which in turn
60e2cda322SAndrea Arcangeli  * instructs the caller to do things like update__mmu_cache.  This
61e2cda322SAndrea Arcangeli  * used to be done in the caller, but sparc needs minor faults to
62e2cda322SAndrea Arcangeli  * force that call on sun4c so we changed this macro slightly
63e2cda322SAndrea Arcangeli  */
64e2cda322SAndrea Arcangeli int ptep_set_access_flags(struct vm_area_struct *vma,
65e2cda322SAndrea Arcangeli 			  unsigned long address, pte_t *ptep,
66e2cda322SAndrea Arcangeli 			  pte_t entry, int dirty)
67e2cda322SAndrea Arcangeli {
68e2cda322SAndrea Arcangeli 	int changed = !pte_same(*ptep, entry);
69e2cda322SAndrea Arcangeli 	if (changed) {
70e2cda322SAndrea Arcangeli 		set_pte_at(vma->vm_mm, address, ptep, entry);
71cef23d9dSRik van Riel 		flush_tlb_fix_spurious_fault(vma, address);
72e2cda322SAndrea Arcangeli 	}
73e2cda322SAndrea Arcangeli 	return changed;
74e2cda322SAndrea Arcangeli }
75e2cda322SAndrea Arcangeli #endif
76e2cda322SAndrea Arcangeli 
7752585bccSVineet Gupta #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
7852585bccSVineet Gupta int ptep_clear_flush_young(struct vm_area_struct *vma,
7952585bccSVineet Gupta 			   unsigned long address, pte_t *ptep)
8052585bccSVineet Gupta {
8152585bccSVineet Gupta 	int young;
8252585bccSVineet Gupta 	young = ptep_test_and_clear_young(vma, address, ptep);
8352585bccSVineet Gupta 	if (young)
8452585bccSVineet Gupta 		flush_tlb_page(vma, address);
8552585bccSVineet Gupta 	return young;
8652585bccSVineet Gupta }
8752585bccSVineet Gupta #endif
8852585bccSVineet Gupta 
8952585bccSVineet Gupta #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
9052585bccSVineet Gupta pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
9152585bccSVineet Gupta 		       pte_t *ptep)
9252585bccSVineet Gupta {
9352585bccSVineet Gupta 	struct mm_struct *mm = (vma)->vm_mm;
9452585bccSVineet Gupta 	pte_t pte;
9552585bccSVineet Gupta 	pte = ptep_get_and_clear(mm, address, ptep);
9652585bccSVineet Gupta 	if (pte_accessible(mm, pte))
9752585bccSVineet Gupta 		flush_tlb_page(vma, address);
9852585bccSVineet Gupta 	return pte;
9952585bccSVineet Gupta }
10052585bccSVineet Gupta #endif
10152585bccSVineet Gupta 
102bd5e88adSVineet Gupta #ifdef CONFIG_TRANSPARENT_HUGEPAGE
103bd5e88adSVineet Gupta 
104e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
105e2cda322SAndrea Arcangeli int pmdp_set_access_flags(struct vm_area_struct *vma,
106e2cda322SAndrea Arcangeli 			  unsigned long address, pmd_t *pmdp,
107e2cda322SAndrea Arcangeli 			  pmd_t entry, int dirty)
108e2cda322SAndrea Arcangeli {
109e2cda322SAndrea Arcangeli 	int changed = !pmd_same(*pmdp, entry);
110e2cda322SAndrea Arcangeli 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
111e2cda322SAndrea Arcangeli 	if (changed) {
112e2cda322SAndrea Arcangeli 		set_pmd_at(vma->vm_mm, address, pmdp, entry);
11312ebc158SVineet Gupta 		flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
114e2cda322SAndrea Arcangeli 	}
115e2cda322SAndrea Arcangeli 	return changed;
116e2cda322SAndrea Arcangeli }
117e2cda322SAndrea Arcangeli #endif
118e2cda322SAndrea Arcangeli 
119e2cda322SAndrea Arcangeli #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
120e2cda322SAndrea Arcangeli int pmdp_clear_flush_young(struct vm_area_struct *vma,
121e2cda322SAndrea Arcangeli 			   unsigned long address, pmd_t *pmdp)
122e2cda322SAndrea Arcangeli {
123e2cda322SAndrea Arcangeli 	int young;
124d8c37c48SNaoya Horiguchi 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
125e2cda322SAndrea Arcangeli 	young = pmdp_test_and_clear_young(vma, address, pmdp);
126e2cda322SAndrea Arcangeli 	if (young)
12712ebc158SVineet Gupta 		flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
128e2cda322SAndrea Arcangeli 	return young;
129e2cda322SAndrea Arcangeli }
130e2cda322SAndrea Arcangeli #endif
131e2cda322SAndrea Arcangeli 
1328809aa2dSAneesh Kumar K.V #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1338809aa2dSAneesh Kumar K.V pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
134e2cda322SAndrea Arcangeli 			    pmd_t *pmdp)
135e2cda322SAndrea Arcangeli {
136e2cda322SAndrea Arcangeli 	pmd_t pmd;
137e2cda322SAndrea Arcangeli 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
138616b8371SZi Yan 	VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
139616b8371SZi Yan 			   !pmd_devmap(*pmdp)) || !pmd_present(*pmdp));
1408809aa2dSAneesh Kumar K.V 	pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
14112ebc158SVineet Gupta 	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
142e2cda322SAndrea Arcangeli 	return pmd;
143e2cda322SAndrea Arcangeli }
144a00cc7d9SMatthew Wilcox 
145a00cc7d9SMatthew Wilcox #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
146a00cc7d9SMatthew Wilcox pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
147a00cc7d9SMatthew Wilcox 			    pud_t *pudp)
148a00cc7d9SMatthew Wilcox {
149a00cc7d9SMatthew Wilcox 	pud_t pud;
150a00cc7d9SMatthew Wilcox 
151a00cc7d9SMatthew Wilcox 	VM_BUG_ON(address & ~HPAGE_PUD_MASK);
152a00cc7d9SMatthew Wilcox 	VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
153a00cc7d9SMatthew Wilcox 	pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
154a00cc7d9SMatthew Wilcox 	flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
155a00cc7d9SMatthew Wilcox 	return pud;
156a00cc7d9SMatthew Wilcox }
157a00cc7d9SMatthew Wilcox #endif
158e2cda322SAndrea Arcangeli #endif
159e2cda322SAndrea Arcangeli 
160e3ebcf64SGerald Schaefer #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
1616b0b50b0SAneesh Kumar K.V void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1626b0b50b0SAneesh Kumar K.V 				pgtable_t pgtable)
163e3ebcf64SGerald Schaefer {
164c4088ebdSKirill A. Shutemov 	assert_spin_locked(pmd_lockptr(mm, pmdp));
165e3ebcf64SGerald Schaefer 
166e3ebcf64SGerald Schaefer 	/* FIFO */
167c389a250SKirill A. Shutemov 	if (!pmd_huge_pte(mm, pmdp))
168e3ebcf64SGerald Schaefer 		INIT_LIST_HEAD(&pgtable->lru);
169e3ebcf64SGerald Schaefer 	else
170c389a250SKirill A. Shutemov 		list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
171c389a250SKirill A. Shutemov 	pmd_huge_pte(mm, pmdp) = pgtable;
172e3ebcf64SGerald Schaefer }
173e3ebcf64SGerald Schaefer #endif
174e3ebcf64SGerald Schaefer 
175e3ebcf64SGerald Schaefer #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
176e3ebcf64SGerald Schaefer /* no "address" argument so destroys page coloring of some arch */
1776b0b50b0SAneesh Kumar K.V pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
178e3ebcf64SGerald Schaefer {
179e3ebcf64SGerald Schaefer 	pgtable_t pgtable;
180e3ebcf64SGerald Schaefer 
181c4088ebdSKirill A. Shutemov 	assert_spin_locked(pmd_lockptr(mm, pmdp));
182e3ebcf64SGerald Schaefer 
183e3ebcf64SGerald Schaefer 	/* FIFO */
184c389a250SKirill A. Shutemov 	pgtable = pmd_huge_pte(mm, pmdp);
18514669347SGeliang Tang 	pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
186e3ebcf64SGerald Schaefer 							  struct page, lru);
18714669347SGeliang Tang 	if (pmd_huge_pte(mm, pmdp))
188e3ebcf64SGerald Schaefer 		list_del(&pgtable->lru);
189e3ebcf64SGerald Schaefer 	return pgtable;
190e3ebcf64SGerald Schaefer }
191e3ebcf64SGerald Schaefer #endif
19246dcde73SGerald Schaefer 
19346dcde73SGerald Schaefer #ifndef __HAVE_ARCH_PMDP_INVALIDATE
194d52605d7SKirill A. Shutemov pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
19546dcde73SGerald Schaefer 		     pmd_t *pmdp)
19646dcde73SGerald Schaefer {
197*86ec2da0SAnshuman Khandual 	pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
19812ebc158SVineet Gupta 	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
199d52605d7SKirill A. Shutemov 	return old;
20046dcde73SGerald Schaefer }
20146dcde73SGerald Schaefer #endif
202f28b6ff8SAneesh Kumar K.V 
203f28b6ff8SAneesh Kumar K.V #ifndef pmdp_collapse_flush
204f28b6ff8SAneesh Kumar K.V pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
205f28b6ff8SAneesh Kumar K.V 			  pmd_t *pmdp)
206f28b6ff8SAneesh Kumar K.V {
2078809aa2dSAneesh Kumar K.V 	/*
2088809aa2dSAneesh Kumar K.V 	 * pmd and hugepage pte format are same. So we could
2098809aa2dSAneesh Kumar K.V 	 * use the same function.
2108809aa2dSAneesh Kumar K.V 	 */
211f28b6ff8SAneesh Kumar K.V 	pmd_t pmd;
212f28b6ff8SAneesh Kumar K.V 
213f28b6ff8SAneesh Kumar K.V 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
214f28b6ff8SAneesh Kumar K.V 	VM_BUG_ON(pmd_trans_huge(*pmdp));
2158809aa2dSAneesh Kumar K.V 	pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
2166a6ac72fSVineet Gupta 
2176a6ac72fSVineet Gupta 	/* collapse entails shooting down ptes not pmd */
2186a6ac72fSVineet Gupta 	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
219f28b6ff8SAneesh Kumar K.V 	return pmd;
220f28b6ff8SAneesh Kumar K.V }
221f28b6ff8SAneesh Kumar K.V #endif
222bd5e88adSVineet Gupta #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
223