xref: /linux/mm/pgtable-generic.c (revision f73a058be5d70dd81a43f16b2bbff4b1576a7af8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  mm/pgtable-generic.c
4  *
5  *  Generic pgtable methods declared in linux/pgtable.h
6  *
7  *  Copyright (C) 2010  Linus Torvalds
8  */
9 
10 #include <linux/pagemap.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pgtable.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mm_inline.h>
16 #include <asm/pgalloc.h>
17 #include <asm/tlb.h>
18 
19 /*
20  * If a p?d_bad entry is found while walking page tables, report
21  * the error, before resetting entry to p?d_none.  Usually (but
22  * very seldom) called out from the p?d_none_or_clear_bad macros.
23  */
24 
25 void pgd_clear_bad(pgd_t *pgd)
26 {
27 	pgd_ERROR(*pgd);
28 	pgd_clear(pgd);
29 }
30 
31 #ifndef __PAGETABLE_P4D_FOLDED
32 void p4d_clear_bad(p4d_t *p4d)
33 {
34 	p4d_ERROR(*p4d);
35 	p4d_clear(p4d);
36 }
37 #endif
38 
39 #ifndef __PAGETABLE_PUD_FOLDED
40 void pud_clear_bad(pud_t *pud)
41 {
42 	pud_ERROR(*pud);
43 	pud_clear(pud);
44 }
45 #endif
46 
47 /*
48  * Note that the pmd variant below can't be stub'ed out just as for p4d/pud
49  * above. pmd folding is special and typically pmd_* macros refer to upper
50  * level even when folded
51  */
52 void pmd_clear_bad(pmd_t *pmd)
53 {
54 	pmd_ERROR(*pmd);
55 	pmd_clear(pmd);
56 }
57 
58 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
59 /*
60  * Only sets the access flags (dirty, accessed), as well as write
61  * permission. Furthermore, we know it always gets set to a "more
62  * permissive" setting, which allows most architectures to optimize
63  * this. We return whether the PTE actually changed, which in turn
64  * instructs the caller to do things like update__mmu_cache.  This
65  * used to be done in the caller, but sparc needs minor faults to
66  * force that call on sun4c so we changed this macro slightly
67  */
68 int ptep_set_access_flags(struct vm_area_struct *vma,
69 			  unsigned long address, pte_t *ptep,
70 			  pte_t entry, int dirty)
71 {
72 	int changed = !pte_same(ptep_get(ptep), entry);
73 	if (changed) {
74 		set_pte_at(vma->vm_mm, address, ptep, entry);
75 		flush_tlb_fix_spurious_fault(vma, address, ptep);
76 	}
77 	return changed;
78 }
79 #endif
80 
81 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
82 int ptep_clear_flush_young(struct vm_area_struct *vma,
83 			   unsigned long address, pte_t *ptep)
84 {
85 	int young;
86 	young = ptep_test_and_clear_young(vma, address, ptep);
87 	if (young)
88 		flush_tlb_page(vma, address);
89 	return young;
90 }
91 #endif
92 
93 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
94 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
95 		       pte_t *ptep)
96 {
97 	struct mm_struct *mm = (vma)->vm_mm;
98 	pte_t pte;
99 	pte = ptep_get_and_clear(mm, address, ptep);
100 	if (pte_accessible(mm, pte))
101 		flush_tlb_page(vma, address);
102 	return pte;
103 }
104 #endif
105 
106 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
107 
108 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
109 int pmdp_set_access_flags(struct vm_area_struct *vma,
110 			  unsigned long address, pmd_t *pmdp,
111 			  pmd_t entry, int dirty)
112 {
113 	int changed = !pmd_same(*pmdp, entry);
114 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
115 	if (changed) {
116 		set_pmd_at(vma->vm_mm, address, pmdp, entry);
117 		flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
118 	}
119 	return changed;
120 }
121 #endif
122 
123 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
124 int pmdp_clear_flush_young(struct vm_area_struct *vma,
125 			   unsigned long address, pmd_t *pmdp)
126 {
127 	int young;
128 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
129 	young = pmdp_test_and_clear_young(vma, address, pmdp);
130 	if (young)
131 		flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
132 	return young;
133 }
134 #endif
135 
136 #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
137 pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
138 			    pmd_t *pmdp)
139 {
140 	pmd_t pmd;
141 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
142 	VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
143 			   !pmd_devmap(*pmdp));
144 	pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
145 	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
146 	return pmd;
147 }
148 
149 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
150 pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
151 			    pud_t *pudp)
152 {
153 	pud_t pud;
154 
155 	VM_BUG_ON(address & ~HPAGE_PUD_MASK);
156 	VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
157 	pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
158 	flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
159 	return pud;
160 }
161 #endif
162 #endif
163 
164 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
165 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
166 				pgtable_t pgtable)
167 {
168 	assert_spin_locked(pmd_lockptr(mm, pmdp));
169 
170 	/* FIFO */
171 	if (!pmd_huge_pte(mm, pmdp))
172 		INIT_LIST_HEAD(&pgtable->lru);
173 	else
174 		list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
175 	pmd_huge_pte(mm, pmdp) = pgtable;
176 }
177 #endif
178 
179 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
180 /* no "address" argument so destroys page coloring of some arch */
181 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
182 {
183 	pgtable_t pgtable;
184 
185 	assert_spin_locked(pmd_lockptr(mm, pmdp));
186 
187 	/* FIFO */
188 	pgtable = pmd_huge_pte(mm, pmdp);
189 	pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
190 							  struct page, lru);
191 	if (pmd_huge_pte(mm, pmdp))
192 		list_del(&pgtable->lru);
193 	return pgtable;
194 }
195 #endif
196 
197 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
198 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
199 		     pmd_t *pmdp)
200 {
201 	VM_WARN_ON_ONCE(!pmd_present(*pmdp));
202 	pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
203 	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
204 	return old;
205 }
206 #endif
207 
208 #ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD
209 pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
210 			 pmd_t *pmdp)
211 {
212 	VM_WARN_ON_ONCE(!pmd_present(*pmdp));
213 	return pmdp_invalidate(vma, address, pmdp);
214 }
215 #endif
216 
217 #ifndef pmdp_collapse_flush
218 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
219 			  pmd_t *pmdp)
220 {
221 	/*
222 	 * pmd and hugepage pte format are same. So we could
223 	 * use the same function.
224 	 */
225 	pmd_t pmd;
226 
227 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
228 	VM_BUG_ON(pmd_trans_huge(*pmdp));
229 	pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
230 
231 	/* collapse entails shooting down ptes not pmd */
232 	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
233 	return pmd;
234 }
235 #endif
236 
237 /* arch define pte_free_defer in asm/pgalloc.h for its own implementation */
238 #ifndef pte_free_defer
239 static void pte_free_now(struct rcu_head *head)
240 {
241 	struct page *page;
242 
243 	page = container_of(head, struct page, rcu_head);
244 	pte_free(NULL /* mm not passed and not used */, (pgtable_t)page);
245 }
246 
247 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
248 {
249 	struct page *page;
250 
251 	page = pgtable;
252 	call_rcu(&page->rcu_head, pte_free_now);
253 }
254 #endif /* pte_free_defer */
255 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
256 
257 #if defined(CONFIG_GUP_GET_PXX_LOW_HIGH) && \
258 	(defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RCU))
259 /*
260  * See the comment above ptep_get_lockless() in include/linux/pgtable.h:
261  * the barriers in pmdp_get_lockless() cannot guarantee that the value in
262  * pmd_high actually belongs with the value in pmd_low; but holding interrupts
263  * off blocks the TLB flush between present updates, which guarantees that a
264  * successful __pte_offset_map() points to a page from matched halves.
265  */
266 static unsigned long pmdp_get_lockless_start(void)
267 {
268 	unsigned long irqflags;
269 
270 	local_irq_save(irqflags);
271 	return irqflags;
272 }
273 static void pmdp_get_lockless_end(unsigned long irqflags)
274 {
275 	local_irq_restore(irqflags);
276 }
277 #else
278 static unsigned long pmdp_get_lockless_start(void) { return 0; }
279 static void pmdp_get_lockless_end(unsigned long irqflags) { }
280 #endif
281 
282 pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp)
283 {
284 	unsigned long irqflags;
285 	pmd_t pmdval;
286 
287 	rcu_read_lock();
288 	irqflags = pmdp_get_lockless_start();
289 	pmdval = pmdp_get_lockless(pmd);
290 	pmdp_get_lockless_end(irqflags);
291 
292 	if (pmdvalp)
293 		*pmdvalp = pmdval;
294 	if (unlikely(pmd_none(pmdval) || is_pmd_migration_entry(pmdval)))
295 		goto nomap;
296 	if (unlikely(pmd_trans_huge(pmdval) || pmd_devmap(pmdval)))
297 		goto nomap;
298 	if (unlikely(pmd_bad(pmdval))) {
299 		pmd_clear_bad(pmd);
300 		goto nomap;
301 	}
302 	return __pte_map(&pmdval, addr);
303 nomap:
304 	rcu_read_unlock();
305 	return NULL;
306 }
307 
308 pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd,
309 			     unsigned long addr, spinlock_t **ptlp)
310 {
311 	pmd_t pmdval;
312 	pte_t *pte;
313 
314 	pte = __pte_offset_map(pmd, addr, &pmdval);
315 	if (likely(pte))
316 		*ptlp = pte_lockptr(mm, &pmdval);
317 	return pte;
318 }
319 
320 /*
321  * pte_offset_map_lock(mm, pmd, addr, ptlp), and its internal implementation
322  * __pte_offset_map_lock() below, is usually called with the pmd pointer for
323  * addr, reached by walking down the mm's pgd, p4d, pud for addr: either while
324  * holding mmap_lock or vma lock for read or for write; or in truncate or rmap
325  * context, while holding file's i_mmap_lock or anon_vma lock for read (or for
326  * write). In a few cases, it may be used with pmd pointing to a pmd_t already
327  * copied to or constructed on the stack.
328  *
329  * When successful, it returns the pte pointer for addr, with its page table
330  * kmapped if necessary (when CONFIG_HIGHPTE), and locked against concurrent
331  * modification by software, with a pointer to that spinlock in ptlp (in some
332  * configs mm->page_table_lock, in SPLIT_PTLOCK configs a spinlock in table's
333  * struct page).  pte_unmap_unlock(pte, ptl) to unlock and unmap afterwards.
334  *
335  * But it is unsuccessful, returning NULL with *ptlp unchanged, if there is no
336  * page table at *pmd: if, for example, the page table has just been removed,
337  * or replaced by the huge pmd of a THP.  (When successful, *pmd is rechecked
338  * after acquiring the ptlock, and retried internally if it changed: so that a
339  * page table can be safely removed or replaced by THP while holding its lock.)
340  *
341  * pte_offset_map(pmd, addr), and its internal helper __pte_offset_map() above,
342  * just returns the pte pointer for addr, its page table kmapped if necessary;
343  * or NULL if there is no page table at *pmd.  It does not attempt to lock the
344  * page table, so cannot normally be used when the page table is to be updated,
345  * or when entries read must be stable.  But it does take rcu_read_lock(): so
346  * that even when page table is racily removed, it remains a valid though empty
347  * and disconnected table.  Until pte_unmap(pte) unmaps and rcu_read_unlock()s
348  * afterwards.
349  *
350  * pte_offset_map_nolock(mm, pmd, addr, ptlp), above, is like pte_offset_map();
351  * but when successful, it also outputs a pointer to the spinlock in ptlp - as
352  * pte_offset_map_lock() does, but in this case without locking it.  This helps
353  * the caller to avoid a later pte_lockptr(mm, *pmd), which might by that time
354  * act on a changed *pmd: pte_offset_map_nolock() provides the correct spinlock
355  * pointer for the page table that it returns.  In principle, the caller should
356  * recheck *pmd once the lock is taken; in practice, no callsite needs that -
357  * either the mmap_lock for write, or pte_same() check on contents, is enough.
358  *
359  * Note that free_pgtables(), used after unmapping detached vmas, or when
360  * exiting the whole mm, does not take page table lock before freeing a page
361  * table, and may not use RCU at all: "outsiders" like khugepaged should avoid
362  * pte_offset_map() and co once the vma is detached from mm or mm_users is zero.
363  */
364 pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
365 			     unsigned long addr, spinlock_t **ptlp)
366 {
367 	spinlock_t *ptl;
368 	pmd_t pmdval;
369 	pte_t *pte;
370 again:
371 	pte = __pte_offset_map(pmd, addr, &pmdval);
372 	if (unlikely(!pte))
373 		return pte;
374 	ptl = pte_lockptr(mm, &pmdval);
375 	spin_lock(ptl);
376 	if (likely(pmd_same(pmdval, pmdp_get_lockless(pmd)))) {
377 		*ptlp = ptl;
378 		return pte;
379 	}
380 	pte_unmap_unlock(pte, ptl);
381 	goto again;
382 }
383