xref: /linux/mm/page_vma_mapped.c (revision fe1136b4ccbfac9b8e72d4551d1ce788a67d59cb)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/rmap.h>
4 #include <linux/hugetlb.h>
5 #include <linux/swap.h>
6 #include <linux/swapops.h>
7 
8 #include "internal.h"
9 
10 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
11 {
12 	page_vma_mapped_walk_done(pvmw);
13 	return false;
14 }
15 
16 static bool map_pte(struct page_vma_mapped_walk *pvmw, pmd_t *pmdvalp,
17 		    spinlock_t **ptlp)
18 {
19 	pte_t ptent;
20 
21 	if (pvmw->flags & PVMW_SYNC) {
22 		/* Use the stricter lookup */
23 		pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd,
24 						pvmw->address, &pvmw->ptl);
25 		*ptlp = pvmw->ptl;
26 		return !!pvmw->pte;
27 	}
28 
29 again:
30 	/*
31 	 * It is important to return the ptl corresponding to pte,
32 	 * in case *pvmw->pmd changes underneath us; so we need to
33 	 * return it even when choosing not to lock, in case caller
34 	 * proceeds to loop over next ptes, and finds a match later.
35 	 * Though, in most cases, page lock already protects this.
36 	 */
37 	pvmw->pte = pte_offset_map_rw_nolock(pvmw->vma->vm_mm, pvmw->pmd,
38 					     pvmw->address, pmdvalp, ptlp);
39 	if (!pvmw->pte)
40 		return false;
41 
42 	ptent = ptep_get(pvmw->pte);
43 
44 	if (pvmw->flags & PVMW_MIGRATION) {
45 		if (!is_swap_pte(ptent))
46 			return false;
47 	} else if (is_swap_pte(ptent)) {
48 		swp_entry_t entry;
49 		/*
50 		 * Handle un-addressable ZONE_DEVICE memory.
51 		 *
52 		 * We get here when we are trying to unmap a private
53 		 * device page from the process address space. Such
54 		 * page is not CPU accessible and thus is mapped as
55 		 * a special swap entry, nonetheless it still does
56 		 * count as a valid regular mapping for the page
57 		 * (and is accounted as such in page maps count).
58 		 *
59 		 * So handle this special case as if it was a normal
60 		 * page mapping ie lock CPU page table and return true.
61 		 *
62 		 * For more details on device private memory see HMM
63 		 * (include/linux/hmm.h or mm/hmm.c).
64 		 */
65 		entry = pte_to_swp_entry(ptent);
66 		if (!is_device_private_entry(entry) &&
67 		    !is_device_exclusive_entry(entry))
68 			return false;
69 	} else if (!pte_present(ptent)) {
70 		return false;
71 	}
72 	spin_lock(*ptlp);
73 	if (unlikely(!pmd_same(*pmdvalp, pmdp_get_lockless(pvmw->pmd)))) {
74 		pte_unmap_unlock(pvmw->pte, *ptlp);
75 		goto again;
76 	}
77 	pvmw->ptl = *ptlp;
78 
79 	return true;
80 }
81 
82 /**
83  * check_pte - check if [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) is
84  * mapped at the @pvmw->pte
85  * @pvmw: page_vma_mapped_walk struct, includes a pair pte and pfn range
86  * for checking
87  *
88  * page_vma_mapped_walk() found a place where pfn range is *potentially*
89  * mapped. check_pte() has to validate this.
90  *
91  * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
92  * arbitrary page.
93  *
94  * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
95  * entry that points to [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages)
96  *
97  * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
98  * [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages)
99  *
100  * Otherwise, return false.
101  *
102  */
103 static bool check_pte(struct page_vma_mapped_walk *pvmw)
104 {
105 	unsigned long pfn;
106 	pte_t ptent = ptep_get(pvmw->pte);
107 
108 	if (pvmw->flags & PVMW_MIGRATION) {
109 		swp_entry_t entry;
110 		if (!is_swap_pte(ptent))
111 			return false;
112 		entry = pte_to_swp_entry(ptent);
113 
114 		if (!is_migration_entry(entry))
115 			return false;
116 
117 		pfn = swp_offset_pfn(entry);
118 	} else if (is_swap_pte(ptent)) {
119 		swp_entry_t entry;
120 
121 		/* Handle un-addressable ZONE_DEVICE memory */
122 		entry = pte_to_swp_entry(ptent);
123 		if (!is_device_private_entry(entry) &&
124 		    !is_device_exclusive_entry(entry))
125 			return false;
126 
127 		pfn = swp_offset_pfn(entry);
128 	} else {
129 		if (!pte_present(ptent))
130 			return false;
131 
132 		pfn = pte_pfn(ptent);
133 	}
134 
135 	return (pfn - pvmw->pfn) < pvmw->nr_pages;
136 }
137 
138 /* Returns true if the two ranges overlap.  Careful to not overflow. */
139 static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw)
140 {
141 	if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn)
142 		return false;
143 	if (pfn > pvmw->pfn + pvmw->nr_pages - 1)
144 		return false;
145 	return true;
146 }
147 
148 static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
149 {
150 	pvmw->address = (pvmw->address + size) & ~(size - 1);
151 	if (!pvmw->address)
152 		pvmw->address = ULONG_MAX;
153 }
154 
155 /**
156  * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at
157  * @pvmw->address
158  * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
159  * must be set. pmd, pte and ptl must be NULL.
160  *
161  * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
162  * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
163  * adjusted if needed (for PTE-mapped THPs).
164  *
165  * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
166  * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
167  * a loop to find all PTEs that map the THP.
168  *
169  * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
170  * regardless of which page table level the page is mapped at. @pvmw->pmd is
171  * NULL.
172  *
173  * Returns false if there are no more page table entries for the page in
174  * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
175  *
176  * If you need to stop the walk before page_vma_mapped_walk() returned false,
177  * use page_vma_mapped_walk_done(). It will do the housekeeping.
178  */
179 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
180 {
181 	struct vm_area_struct *vma = pvmw->vma;
182 	struct mm_struct *mm = vma->vm_mm;
183 	unsigned long end;
184 	spinlock_t *ptl;
185 	pgd_t *pgd;
186 	p4d_t *p4d;
187 	pud_t *pud;
188 	pmd_t pmde;
189 
190 	/* The only possible pmd mapping has been handled on last iteration */
191 	if (pvmw->pmd && !pvmw->pte)
192 		return not_found(pvmw);
193 
194 	if (unlikely(is_vm_hugetlb_page(vma))) {
195 		struct hstate *hstate = hstate_vma(vma);
196 		unsigned long size = huge_page_size(hstate);
197 		/* The only possible mapping was handled on last iteration */
198 		if (pvmw->pte)
199 			return not_found(pvmw);
200 		/*
201 		 * All callers that get here will already hold the
202 		 * i_mmap_rwsem.  Therefore, no additional locks need to be
203 		 * taken before calling hugetlb_walk().
204 		 */
205 		pvmw->pte = hugetlb_walk(vma, pvmw->address, size);
206 		if (!pvmw->pte)
207 			return false;
208 
209 		pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte);
210 		if (!check_pte(pvmw))
211 			return not_found(pvmw);
212 		return true;
213 	}
214 
215 	end = vma_address_end(pvmw);
216 	if (pvmw->pte)
217 		goto next_pte;
218 restart:
219 	do {
220 		pgd = pgd_offset(mm, pvmw->address);
221 		if (!pgd_present(*pgd)) {
222 			step_forward(pvmw, PGDIR_SIZE);
223 			continue;
224 		}
225 		p4d = p4d_offset(pgd, pvmw->address);
226 		if (!p4d_present(*p4d)) {
227 			step_forward(pvmw, P4D_SIZE);
228 			continue;
229 		}
230 		pud = pud_offset(p4d, pvmw->address);
231 		if (!pud_present(*pud)) {
232 			step_forward(pvmw, PUD_SIZE);
233 			continue;
234 		}
235 
236 		pvmw->pmd = pmd_offset(pud, pvmw->address);
237 		/*
238 		 * Make sure the pmd value isn't cached in a register by the
239 		 * compiler and used as a stale value after we've observed a
240 		 * subsequent update.
241 		 */
242 		pmde = pmdp_get_lockless(pvmw->pmd);
243 
244 		if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) ||
245 		    (pmd_present(pmde) && pmd_devmap(pmde))) {
246 			pvmw->ptl = pmd_lock(mm, pvmw->pmd);
247 			pmde = *pvmw->pmd;
248 			if (!pmd_present(pmde)) {
249 				swp_entry_t entry;
250 
251 				if (!thp_migration_supported() ||
252 				    !(pvmw->flags & PVMW_MIGRATION))
253 					return not_found(pvmw);
254 				entry = pmd_to_swp_entry(pmde);
255 				if (!is_migration_entry(entry) ||
256 				    !check_pmd(swp_offset_pfn(entry), pvmw))
257 					return not_found(pvmw);
258 				return true;
259 			}
260 			if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) {
261 				if (pvmw->flags & PVMW_MIGRATION)
262 					return not_found(pvmw);
263 				if (!check_pmd(pmd_pfn(pmde), pvmw))
264 					return not_found(pvmw);
265 				return true;
266 			}
267 			/* THP pmd was split under us: handle on pte level */
268 			spin_unlock(pvmw->ptl);
269 			pvmw->ptl = NULL;
270 		} else if (!pmd_present(pmde)) {
271 			/*
272 			 * If PVMW_SYNC, take and drop THP pmd lock so that we
273 			 * cannot return prematurely, while zap_huge_pmd() has
274 			 * cleared *pmd but not decremented compound_mapcount().
275 			 */
276 			if ((pvmw->flags & PVMW_SYNC) &&
277 			    thp_vma_suitable_order(vma, pvmw->address,
278 						   PMD_ORDER) &&
279 			    (pvmw->nr_pages >= HPAGE_PMD_NR)) {
280 				spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
281 
282 				spin_unlock(ptl);
283 			}
284 			step_forward(pvmw, PMD_SIZE);
285 			continue;
286 		}
287 		if (!map_pte(pvmw, &pmde, &ptl)) {
288 			if (!pvmw->pte)
289 				goto restart;
290 			goto next_pte;
291 		}
292 this_pte:
293 		if (check_pte(pvmw))
294 			return true;
295 next_pte:
296 		do {
297 			pvmw->address += PAGE_SIZE;
298 			if (pvmw->address >= end)
299 				return not_found(pvmw);
300 			/* Did we cross page table boundary? */
301 			if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
302 				if (pvmw->ptl) {
303 					spin_unlock(pvmw->ptl);
304 					pvmw->ptl = NULL;
305 				}
306 				pte_unmap(pvmw->pte);
307 				pvmw->pte = NULL;
308 				goto restart;
309 			}
310 			pvmw->pte++;
311 		} while (pte_none(ptep_get(pvmw->pte)));
312 
313 		if (!pvmw->ptl) {
314 			spin_lock(ptl);
315 			if (unlikely(!pmd_same(pmde, pmdp_get_lockless(pvmw->pmd)))) {
316 				pte_unmap_unlock(pvmw->pte, ptl);
317 				pvmw->pte = NULL;
318 				goto restart;
319 			}
320 			pvmw->ptl = ptl;
321 		}
322 		goto this_pte;
323 	} while (pvmw->address < end);
324 
325 	return false;
326 }
327 
328 #ifdef CONFIG_MEMORY_FAILURE
329 /**
330  * page_mapped_in_vma - check whether a page is really mapped in a VMA
331  * @page: the page to test
332  * @vma: the VMA to test
333  *
334  * Return: The address the page is mapped at if the page is in the range
335  * covered by the VMA and present in the page table.  If the page is
336  * outside the VMA or not present, returns -EFAULT.
337  * Only valid for normal file or anonymous VMAs.
338  */
339 unsigned long page_mapped_in_vma(const struct page *page,
340 		struct vm_area_struct *vma)
341 {
342 	const struct folio *folio = page_folio(page);
343 	struct page_vma_mapped_walk pvmw = {
344 		.pfn = page_to_pfn(page),
345 		.nr_pages = 1,
346 		.vma = vma,
347 		.flags = PVMW_SYNC,
348 	};
349 
350 	pvmw.address = vma_address(vma, page_pgoff(folio, page), 1);
351 	if (pvmw.address == -EFAULT)
352 		goto out;
353 	if (!page_vma_mapped_walk(&pvmw))
354 		return -EFAULT;
355 	page_vma_mapped_walk_done(&pvmw);
356 out:
357 	return pvmw.address;
358 }
359 #endif
360