xref: /linux/mm/pagewalk.c (revision 7b86ac3371b70c3fd8fd95501719beb1faab719f)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2a520110eSChristoph Hellwig #include <linux/pagewalk.h>
3e6473092SMatt Mackall #include <linux/highmem.h>
4e6473092SMatt Mackall #include <linux/sched.h>
5d33b9f45SNaoya Horiguchi #include <linux/hugetlb.h>
6e6473092SMatt Mackall 
7e6473092SMatt Mackall static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
82165009bSDave Hansen 			  struct mm_walk *walk)
9e6473092SMatt Mackall {
10e6473092SMatt Mackall 	pte_t *pte;
11e6473092SMatt Mackall 	int err = 0;
12*7b86ac33SChristoph Hellwig 	const struct mm_walk_ops *ops = walk->ops;
13e6473092SMatt Mackall 
14e6473092SMatt Mackall 	pte = pte_offset_map(pmd, addr);
15556637cdSJohannes Weiner 	for (;;) {
16*7b86ac33SChristoph Hellwig 		err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
17e6473092SMatt Mackall 		if (err)
18e6473092SMatt Mackall 		       break;
19556637cdSJohannes Weiner 		addr += PAGE_SIZE;
20556637cdSJohannes Weiner 		if (addr == end)
21556637cdSJohannes Weiner 			break;
22556637cdSJohannes Weiner 		pte++;
23556637cdSJohannes Weiner 	}
24e6473092SMatt Mackall 
25e6473092SMatt Mackall 	pte_unmap(pte);
26e6473092SMatt Mackall 	return err;
27e6473092SMatt Mackall }
28e6473092SMatt Mackall 
29e6473092SMatt Mackall static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
302165009bSDave Hansen 			  struct mm_walk *walk)
31e6473092SMatt Mackall {
32e6473092SMatt Mackall 	pmd_t *pmd;
33e6473092SMatt Mackall 	unsigned long next;
34*7b86ac33SChristoph Hellwig 	const struct mm_walk_ops *ops = walk->ops;
35e6473092SMatt Mackall 	int err = 0;
36e6473092SMatt Mackall 
37e6473092SMatt Mackall 	pmd = pmd_offset(pud, addr);
38e6473092SMatt Mackall 	do {
3903319327SDave Hansen again:
40e6473092SMatt Mackall 		next = pmd_addr_end(addr, end);
4148684a65SNaoya Horiguchi 		if (pmd_none(*pmd) || !walk->vma) {
42*7b86ac33SChristoph Hellwig 			if (ops->pte_hole)
43*7b86ac33SChristoph Hellwig 				err = ops->pte_hole(addr, next, walk);
44e6473092SMatt Mackall 			if (err)
45e6473092SMatt Mackall 				break;
46e6473092SMatt Mackall 			continue;
47e6473092SMatt Mackall 		}
4803319327SDave Hansen 		/*
4903319327SDave Hansen 		 * This implies that each ->pmd_entry() handler
5003319327SDave Hansen 		 * needs to know about pmd_trans_huge() pmds
5103319327SDave Hansen 		 */
52*7b86ac33SChristoph Hellwig 		if (ops->pmd_entry)
53*7b86ac33SChristoph Hellwig 			err = ops->pmd_entry(pmd, addr, next, walk);
5403319327SDave Hansen 		if (err)
5503319327SDave Hansen 			break;
5603319327SDave Hansen 
5703319327SDave Hansen 		/*
5803319327SDave Hansen 		 * Check this here so we only break down trans_huge
5903319327SDave Hansen 		 * pages when we _need_ to
6003319327SDave Hansen 		 */
61*7b86ac33SChristoph Hellwig 		if (!ops->pte_entry)
6203319327SDave Hansen 			continue;
6303319327SDave Hansen 
6478ddc534SKirill A. Shutemov 		split_huge_pmd(walk->vma, pmd, addr);
65fafaa426SNaoya Horiguchi 		if (pmd_trans_unstable(pmd))
6603319327SDave Hansen 			goto again;
672165009bSDave Hansen 		err = walk_pte_range(pmd, addr, next, walk);
68e6473092SMatt Mackall 		if (err)
69e6473092SMatt Mackall 			break;
70e6473092SMatt Mackall 	} while (pmd++, addr = next, addr != end);
71e6473092SMatt Mackall 
72e6473092SMatt Mackall 	return err;
73e6473092SMatt Mackall }
74e6473092SMatt Mackall 
75c2febafcSKirill A. Shutemov static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
762165009bSDave Hansen 			  struct mm_walk *walk)
77e6473092SMatt Mackall {
78e6473092SMatt Mackall 	pud_t *pud;
79e6473092SMatt Mackall 	unsigned long next;
80*7b86ac33SChristoph Hellwig 	const struct mm_walk_ops *ops = walk->ops;
81e6473092SMatt Mackall 	int err = 0;
82e6473092SMatt Mackall 
83c2febafcSKirill A. Shutemov 	pud = pud_offset(p4d, addr);
84e6473092SMatt Mackall 	do {
85a00cc7d9SMatthew Wilcox  again:
86e6473092SMatt Mackall 		next = pud_addr_end(addr, end);
87a00cc7d9SMatthew Wilcox 		if (pud_none(*pud) || !walk->vma) {
88*7b86ac33SChristoph Hellwig 			if (ops->pte_hole)
89*7b86ac33SChristoph Hellwig 				err = ops->pte_hole(addr, next, walk);
90e6473092SMatt Mackall 			if (err)
91e6473092SMatt Mackall 				break;
92e6473092SMatt Mackall 			continue;
93e6473092SMatt Mackall 		}
94a00cc7d9SMatthew Wilcox 
95*7b86ac33SChristoph Hellwig 		if (ops->pud_entry) {
96a00cc7d9SMatthew Wilcox 			spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma);
97a00cc7d9SMatthew Wilcox 
98a00cc7d9SMatthew Wilcox 			if (ptl) {
99*7b86ac33SChristoph Hellwig 				err = ops->pud_entry(pud, addr, next, walk);
100a00cc7d9SMatthew Wilcox 				spin_unlock(ptl);
101a00cc7d9SMatthew Wilcox 				if (err)
102a00cc7d9SMatthew Wilcox 					break;
103a00cc7d9SMatthew Wilcox 				continue;
104a00cc7d9SMatthew Wilcox 			}
105a00cc7d9SMatthew Wilcox 		}
106a00cc7d9SMatthew Wilcox 
107a00cc7d9SMatthew Wilcox 		split_huge_pud(walk->vma, pud, addr);
108a00cc7d9SMatthew Wilcox 		if (pud_none(*pud))
109a00cc7d9SMatthew Wilcox 			goto again;
110a00cc7d9SMatthew Wilcox 
111*7b86ac33SChristoph Hellwig 		if (ops->pmd_entry || ops->pte_entry)
1122165009bSDave Hansen 			err = walk_pmd_range(pud, addr, next, walk);
113e6473092SMatt Mackall 		if (err)
114e6473092SMatt Mackall 			break;
115e6473092SMatt Mackall 	} while (pud++, addr = next, addr != end);
116e6473092SMatt Mackall 
117e6473092SMatt Mackall 	return err;
118e6473092SMatt Mackall }
119e6473092SMatt Mackall 
120c2febafcSKirill A. Shutemov static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
121c2febafcSKirill A. Shutemov 			  struct mm_walk *walk)
122c2febafcSKirill A. Shutemov {
123c2febafcSKirill A. Shutemov 	p4d_t *p4d;
124c2febafcSKirill A. Shutemov 	unsigned long next;
125*7b86ac33SChristoph Hellwig 	const struct mm_walk_ops *ops = walk->ops;
126c2febafcSKirill A. Shutemov 	int err = 0;
127c2febafcSKirill A. Shutemov 
128c2febafcSKirill A. Shutemov 	p4d = p4d_offset(pgd, addr);
129c2febafcSKirill A. Shutemov 	do {
130c2febafcSKirill A. Shutemov 		next = p4d_addr_end(addr, end);
131c2febafcSKirill A. Shutemov 		if (p4d_none_or_clear_bad(p4d)) {
132*7b86ac33SChristoph Hellwig 			if (ops->pte_hole)
133*7b86ac33SChristoph Hellwig 				err = ops->pte_hole(addr, next, walk);
134c2febafcSKirill A. Shutemov 			if (err)
135c2febafcSKirill A. Shutemov 				break;
136c2febafcSKirill A. Shutemov 			continue;
137c2febafcSKirill A. Shutemov 		}
138*7b86ac33SChristoph Hellwig 		if (ops->pmd_entry || ops->pte_entry)
139c2febafcSKirill A. Shutemov 			err = walk_pud_range(p4d, addr, next, walk);
140c2febafcSKirill A. Shutemov 		if (err)
141c2febafcSKirill A. Shutemov 			break;
142c2febafcSKirill A. Shutemov 	} while (p4d++, addr = next, addr != end);
143c2febafcSKirill A. Shutemov 
144c2febafcSKirill A. Shutemov 	return err;
145c2febafcSKirill A. Shutemov }
146c2febafcSKirill A. Shutemov 
147fafaa426SNaoya Horiguchi static int walk_pgd_range(unsigned long addr, unsigned long end,
148fafaa426SNaoya Horiguchi 			  struct mm_walk *walk)
149fafaa426SNaoya Horiguchi {
150fafaa426SNaoya Horiguchi 	pgd_t *pgd;
151fafaa426SNaoya Horiguchi 	unsigned long next;
152*7b86ac33SChristoph Hellwig 	const struct mm_walk_ops *ops = walk->ops;
153fafaa426SNaoya Horiguchi 	int err = 0;
154fafaa426SNaoya Horiguchi 
155fafaa426SNaoya Horiguchi 	pgd = pgd_offset(walk->mm, addr);
156fafaa426SNaoya Horiguchi 	do {
157fafaa426SNaoya Horiguchi 		next = pgd_addr_end(addr, end);
158fafaa426SNaoya Horiguchi 		if (pgd_none_or_clear_bad(pgd)) {
159*7b86ac33SChristoph Hellwig 			if (ops->pte_hole)
160*7b86ac33SChristoph Hellwig 				err = ops->pte_hole(addr, next, walk);
161fafaa426SNaoya Horiguchi 			if (err)
162fafaa426SNaoya Horiguchi 				break;
163fafaa426SNaoya Horiguchi 			continue;
164fafaa426SNaoya Horiguchi 		}
165*7b86ac33SChristoph Hellwig 		if (ops->pmd_entry || ops->pte_entry)
166c2febafcSKirill A. Shutemov 			err = walk_p4d_range(pgd, addr, next, walk);
167fafaa426SNaoya Horiguchi 		if (err)
168fafaa426SNaoya Horiguchi 			break;
169fafaa426SNaoya Horiguchi 	} while (pgd++, addr = next, addr != end);
170fafaa426SNaoya Horiguchi 
171fafaa426SNaoya Horiguchi 	return err;
172fafaa426SNaoya Horiguchi }
173fafaa426SNaoya Horiguchi 
174116354d1SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
175116354d1SNaoya Horiguchi static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
176116354d1SNaoya Horiguchi 				       unsigned long end)
177116354d1SNaoya Horiguchi {
178116354d1SNaoya Horiguchi 	unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
179116354d1SNaoya Horiguchi 	return boundary < end ? boundary : end;
180116354d1SNaoya Horiguchi }
181116354d1SNaoya Horiguchi 
182fafaa426SNaoya Horiguchi static int walk_hugetlb_range(unsigned long addr, unsigned long end,
183116354d1SNaoya Horiguchi 			      struct mm_walk *walk)
184116354d1SNaoya Horiguchi {
185fafaa426SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
186116354d1SNaoya Horiguchi 	struct hstate *h = hstate_vma(vma);
187116354d1SNaoya Horiguchi 	unsigned long next;
188116354d1SNaoya Horiguchi 	unsigned long hmask = huge_page_mask(h);
1897868a208SPunit Agrawal 	unsigned long sz = huge_page_size(h);
190116354d1SNaoya Horiguchi 	pte_t *pte;
191*7b86ac33SChristoph Hellwig 	const struct mm_walk_ops *ops = walk->ops;
192116354d1SNaoya Horiguchi 	int err = 0;
193116354d1SNaoya Horiguchi 
194116354d1SNaoya Horiguchi 	do {
195116354d1SNaoya Horiguchi 		next = hugetlb_entry_end(h, addr, end);
1967868a208SPunit Agrawal 		pte = huge_pte_offset(walk->mm, addr & hmask, sz);
197373c4557SJann Horn 
198373c4557SJann Horn 		if (pte)
199*7b86ac33SChristoph Hellwig 			err = ops->hugetlb_entry(pte, hmask, addr, next, walk);
200*7b86ac33SChristoph Hellwig 		else if (ops->pte_hole)
201*7b86ac33SChristoph Hellwig 			err = ops->pte_hole(addr, next, walk);
202373c4557SJann Horn 
203116354d1SNaoya Horiguchi 		if (err)
204fafaa426SNaoya Horiguchi 			break;
205116354d1SNaoya Horiguchi 	} while (addr = next, addr != end);
206116354d1SNaoya Horiguchi 
207fafaa426SNaoya Horiguchi 	return err;
208116354d1SNaoya Horiguchi }
2096c6d5280SKOSAKI Motohiro 
2106c6d5280SKOSAKI Motohiro #else /* CONFIG_HUGETLB_PAGE */
211fafaa426SNaoya Horiguchi static int walk_hugetlb_range(unsigned long addr, unsigned long end,
2126c6d5280SKOSAKI Motohiro 			      struct mm_walk *walk)
2136c6d5280SKOSAKI Motohiro {
2146c6d5280SKOSAKI Motohiro 	return 0;
2156c6d5280SKOSAKI Motohiro }
2166c6d5280SKOSAKI Motohiro 
2176c6d5280SKOSAKI Motohiro #endif /* CONFIG_HUGETLB_PAGE */
2186c6d5280SKOSAKI Motohiro 
219fafaa426SNaoya Horiguchi /*
220fafaa426SNaoya Horiguchi  * Decide whether we really walk over the current vma on [@start, @end)
221fafaa426SNaoya Horiguchi  * or skip it via the returned value. Return 0 if we do walk over the
222fafaa426SNaoya Horiguchi  * current vma, and return 1 if we skip the vma. Negative values means
223fafaa426SNaoya Horiguchi  * error, where we abort the current walk.
224e6473092SMatt Mackall  */
225fafaa426SNaoya Horiguchi static int walk_page_test(unsigned long start, unsigned long end,
2262165009bSDave Hansen 			struct mm_walk *walk)
227e6473092SMatt Mackall {
228fafaa426SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
229*7b86ac33SChristoph Hellwig 	const struct mm_walk_ops *ops = walk->ops;
230e6473092SMatt Mackall 
231*7b86ac33SChristoph Hellwig 	if (ops->test_walk)
232*7b86ac33SChristoph Hellwig 		return ops->test_walk(start, end, walk);
233fafaa426SNaoya Horiguchi 
234fafaa426SNaoya Horiguchi 	/*
23548684a65SNaoya Horiguchi 	 * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
23648684a65SNaoya Horiguchi 	 * range, so we don't walk over it as we do for normal vmas. However,
23748684a65SNaoya Horiguchi 	 * Some callers are interested in handling hole range and they don't
23848684a65SNaoya Horiguchi 	 * want to just ignore any single address range. Such users certainly
23948684a65SNaoya Horiguchi 	 * define their ->pte_hole() callbacks, so let's delegate them to handle
24048684a65SNaoya Horiguchi 	 * vma(VM_PFNMAP).
241fafaa426SNaoya Horiguchi 	 */
24248684a65SNaoya Horiguchi 	if (vma->vm_flags & VM_PFNMAP) {
24348684a65SNaoya Horiguchi 		int err = 1;
244*7b86ac33SChristoph Hellwig 		if (ops->pte_hole)
245*7b86ac33SChristoph Hellwig 			err = ops->pte_hole(start, end, walk);
24648684a65SNaoya Horiguchi 		return err ? err : 1;
24748684a65SNaoya Horiguchi 	}
248fafaa426SNaoya Horiguchi 	return 0;
249fafaa426SNaoya Horiguchi }
250fafaa426SNaoya Horiguchi 
251fafaa426SNaoya Horiguchi static int __walk_page_range(unsigned long start, unsigned long end,
252fafaa426SNaoya Horiguchi 			struct mm_walk *walk)
253fafaa426SNaoya Horiguchi {
254fafaa426SNaoya Horiguchi 	int err = 0;
255fafaa426SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
256fafaa426SNaoya Horiguchi 
257fafaa426SNaoya Horiguchi 	if (vma && is_vm_hugetlb_page(vma)) {
258*7b86ac33SChristoph Hellwig 		if (walk->ops->hugetlb_entry)
259fafaa426SNaoya Horiguchi 			err = walk_hugetlb_range(start, end, walk);
260fafaa426SNaoya Horiguchi 	} else
261fafaa426SNaoya Horiguchi 		err = walk_pgd_range(start, end, walk);
262fafaa426SNaoya Horiguchi 
263e6473092SMatt Mackall 	return err;
264fafaa426SNaoya Horiguchi }
265fafaa426SNaoya Horiguchi 
266fafaa426SNaoya Horiguchi /**
267fafaa426SNaoya Horiguchi  * walk_page_range - walk page table with caller specific callbacks
268*7b86ac33SChristoph Hellwig  * @mm:		mm_struct representing the target process of page table walk
269e8b098fcSMike Rapoport  * @start:	start address of the virtual address range
270e8b098fcSMike Rapoport  * @end:	end address of the virtual address range
271*7b86ac33SChristoph Hellwig  * @ops:	operation to call during the walk
272*7b86ac33SChristoph Hellwig  * @private:	private data for callbacks' usage
273fafaa426SNaoya Horiguchi  *
274*7b86ac33SChristoph Hellwig  * Recursively walk the page table tree of the process represented by @mm
275fafaa426SNaoya Horiguchi  * within the virtual address range [@start, @end). During walking, we can do
276fafaa426SNaoya Horiguchi  * some caller-specific works for each entry, by setting up pmd_entry(),
277fafaa426SNaoya Horiguchi  * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
278fafaa426SNaoya Horiguchi  * callbacks, the associated entries/pages are just ignored.
279fafaa426SNaoya Horiguchi  * The return values of these callbacks are commonly defined like below:
280a5d09bedSMike Rapoport  *
281fafaa426SNaoya Horiguchi  *  - 0  : succeeded to handle the current entry, and if you don't reach the
282fafaa426SNaoya Horiguchi  *         end address yet, continue to walk.
283fafaa426SNaoya Horiguchi  *  - >0 : succeeded to handle the current entry, and return to the caller
284fafaa426SNaoya Horiguchi  *         with caller specific value.
285fafaa426SNaoya Horiguchi  *  - <0 : failed to handle the current entry, and return to the caller
286fafaa426SNaoya Horiguchi  *         with error code.
287fafaa426SNaoya Horiguchi  *
288fafaa426SNaoya Horiguchi  * Before starting to walk page table, some callers want to check whether
289fafaa426SNaoya Horiguchi  * they really want to walk over the current vma, typically by checking
290*7b86ac33SChristoph Hellwig  * its vm_flags. walk_page_test() and @ops->test_walk() are used for this
291fafaa426SNaoya Horiguchi  * purpose.
292fafaa426SNaoya Horiguchi  *
293fafaa426SNaoya Horiguchi  * struct mm_walk keeps current values of some common data like vma and pmd,
294fafaa426SNaoya Horiguchi  * which are useful for the access from callbacks. If you want to pass some
295*7b86ac33SChristoph Hellwig  * caller-specific data to callbacks, @private should be helpful.
296fafaa426SNaoya Horiguchi  *
297fafaa426SNaoya Horiguchi  * Locking:
298*7b86ac33SChristoph Hellwig  *   Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_sem,
299*7b86ac33SChristoph Hellwig  *   because these function traverse vma list and/or access to vma's data.
300fafaa426SNaoya Horiguchi  */
301*7b86ac33SChristoph Hellwig int walk_page_range(struct mm_struct *mm, unsigned long start,
302*7b86ac33SChristoph Hellwig 		unsigned long end, const struct mm_walk_ops *ops,
303*7b86ac33SChristoph Hellwig 		void *private)
304fafaa426SNaoya Horiguchi {
305fafaa426SNaoya Horiguchi 	int err = 0;
306fafaa426SNaoya Horiguchi 	unsigned long next;
307fafaa426SNaoya Horiguchi 	struct vm_area_struct *vma;
308*7b86ac33SChristoph Hellwig 	struct mm_walk walk = {
309*7b86ac33SChristoph Hellwig 		.ops		= ops,
310*7b86ac33SChristoph Hellwig 		.mm		= mm,
311*7b86ac33SChristoph Hellwig 		.private	= private,
312*7b86ac33SChristoph Hellwig 	};
313fafaa426SNaoya Horiguchi 
314fafaa426SNaoya Horiguchi 	if (start >= end)
315fafaa426SNaoya Horiguchi 		return -EINVAL;
316e6473092SMatt Mackall 
317*7b86ac33SChristoph Hellwig 	if (!walk.mm)
3182165009bSDave Hansen 		return -EINVAL;
3192165009bSDave Hansen 
320*7b86ac33SChristoph Hellwig 	VM_BUG_ON_MM(!rwsem_is_locked(&walk.mm->mmap_sem), walk.mm);
321a9ff785eSCliff Wickman 
322*7b86ac33SChristoph Hellwig 	vma = find_vma(walk.mm, start);
323e6473092SMatt Mackall 	do {
324fafaa426SNaoya Horiguchi 		if (!vma) { /* after the last vma */
325*7b86ac33SChristoph Hellwig 			walk.vma = NULL;
326fafaa426SNaoya Horiguchi 			next = end;
327fafaa426SNaoya Horiguchi 		} else if (start < vma->vm_start) { /* outside vma */
328*7b86ac33SChristoph Hellwig 			walk.vma = NULL;
329fafaa426SNaoya Horiguchi 			next = min(end, vma->vm_start);
330fafaa426SNaoya Horiguchi 		} else { /* inside vma */
331*7b86ac33SChristoph Hellwig 			walk.vma = vma;
332fafaa426SNaoya Horiguchi 			next = min(end, vma->vm_end);
333fafaa426SNaoya Horiguchi 			vma = vma->vm_next;
3345f0af70aSDavid Sterba 
335*7b86ac33SChristoph Hellwig 			err = walk_page_test(start, next, &walk);
336f6837395SNaoya Horiguchi 			if (err > 0) {
337f6837395SNaoya Horiguchi 				/*
338f6837395SNaoya Horiguchi 				 * positive return values are purely for
339f6837395SNaoya Horiguchi 				 * controlling the pagewalk, so should never
340f6837395SNaoya Horiguchi 				 * be passed to the callers.
341f6837395SNaoya Horiguchi 				 */
342f6837395SNaoya Horiguchi 				err = 0;
343a9ff785eSCliff Wickman 				continue;
344f6837395SNaoya Horiguchi 			}
345fafaa426SNaoya Horiguchi 			if (err < 0)
346fafaa426SNaoya Horiguchi 				break;
347a9ff785eSCliff Wickman 		}
348*7b86ac33SChristoph Hellwig 		if (walk.vma || walk.ops->pte_hole)
349*7b86ac33SChristoph Hellwig 			err = __walk_page_range(start, next, &walk);
3505dc37642SNaoya Horiguchi 		if (err)
3515dc37642SNaoya Horiguchi 			break;
352fafaa426SNaoya Horiguchi 	} while (start = next, start < end);
353e6473092SMatt Mackall 	return err;
354e6473092SMatt Mackall }
355900fc5f1SNaoya Horiguchi 
356*7b86ac33SChristoph Hellwig int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
357*7b86ac33SChristoph Hellwig 		void *private)
358900fc5f1SNaoya Horiguchi {
359*7b86ac33SChristoph Hellwig 	struct mm_walk walk = {
360*7b86ac33SChristoph Hellwig 		.ops		= ops,
361*7b86ac33SChristoph Hellwig 		.mm		= vma->vm_mm,
362*7b86ac33SChristoph Hellwig 		.vma		= vma,
363*7b86ac33SChristoph Hellwig 		.private	= private,
364*7b86ac33SChristoph Hellwig 	};
365900fc5f1SNaoya Horiguchi 	int err;
366900fc5f1SNaoya Horiguchi 
367*7b86ac33SChristoph Hellwig 	if (!walk.mm)
368900fc5f1SNaoya Horiguchi 		return -EINVAL;
369900fc5f1SNaoya Horiguchi 
370*7b86ac33SChristoph Hellwig 	VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
371*7b86ac33SChristoph Hellwig 
372*7b86ac33SChristoph Hellwig 	err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
373900fc5f1SNaoya Horiguchi 	if (err > 0)
374900fc5f1SNaoya Horiguchi 		return 0;
375900fc5f1SNaoya Horiguchi 	if (err < 0)
376900fc5f1SNaoya Horiguchi 		return err;
377*7b86ac33SChristoph Hellwig 	return __walk_page_range(vma->vm_start, vma->vm_end, &walk);
378900fc5f1SNaoya Horiguchi }
379