xref: /linux/mm/pagewalk.c (revision 2165009bdf63f79716a36ad545df14c3cdf958b7)
1e6473092SMatt Mackall #include <linux/mm.h>
2e6473092SMatt Mackall #include <linux/highmem.h>
3e6473092SMatt Mackall #include <linux/sched.h>
4e6473092SMatt Mackall 
5e6473092SMatt Mackall static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
6*2165009bSDave Hansen 			  struct mm_walk *walk)
7e6473092SMatt Mackall {
8e6473092SMatt Mackall 	pte_t *pte;
9e6473092SMatt Mackall 	int err = 0;
10e6473092SMatt Mackall 
11e6473092SMatt Mackall 	pte = pte_offset_map(pmd, addr);
12556637cdSJohannes Weiner 	for (;;) {
13*2165009bSDave Hansen 		err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
14e6473092SMatt Mackall 		if (err)
15e6473092SMatt Mackall 		       break;
16556637cdSJohannes Weiner 		addr += PAGE_SIZE;
17556637cdSJohannes Weiner 		if (addr == end)
18556637cdSJohannes Weiner 			break;
19556637cdSJohannes Weiner 		pte++;
20556637cdSJohannes Weiner 	}
21e6473092SMatt Mackall 
22e6473092SMatt Mackall 	pte_unmap(pte);
23e6473092SMatt Mackall 	return err;
24e6473092SMatt Mackall }
25e6473092SMatt Mackall 
26e6473092SMatt Mackall static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
27*2165009bSDave Hansen 			  struct mm_walk *walk)
28e6473092SMatt Mackall {
29e6473092SMatt Mackall 	pmd_t *pmd;
30e6473092SMatt Mackall 	unsigned long next;
31e6473092SMatt Mackall 	int err = 0;
32e6473092SMatt Mackall 
33e6473092SMatt Mackall 	pmd = pmd_offset(pud, addr);
34e6473092SMatt Mackall 	do {
35e6473092SMatt Mackall 		next = pmd_addr_end(addr, end);
36e6473092SMatt Mackall 		if (pmd_none_or_clear_bad(pmd)) {
37e6473092SMatt Mackall 			if (walk->pte_hole)
38*2165009bSDave Hansen 				err = walk->pte_hole(addr, next, walk);
39e6473092SMatt Mackall 			if (err)
40e6473092SMatt Mackall 				break;
41e6473092SMatt Mackall 			continue;
42e6473092SMatt Mackall 		}
43e6473092SMatt Mackall 		if (walk->pmd_entry)
44*2165009bSDave Hansen 			err = walk->pmd_entry(pmd, addr, next, walk);
45e6473092SMatt Mackall 		if (!err && walk->pte_entry)
46*2165009bSDave Hansen 			err = walk_pte_range(pmd, addr, next, walk);
47e6473092SMatt Mackall 		if (err)
48e6473092SMatt Mackall 			break;
49e6473092SMatt Mackall 	} while (pmd++, addr = next, addr != end);
50e6473092SMatt Mackall 
51e6473092SMatt Mackall 	return err;
52e6473092SMatt Mackall }
53e6473092SMatt Mackall 
54e6473092SMatt Mackall static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
55*2165009bSDave Hansen 			  struct mm_walk *walk)
56e6473092SMatt Mackall {
57e6473092SMatt Mackall 	pud_t *pud;
58e6473092SMatt Mackall 	unsigned long next;
59e6473092SMatt Mackall 	int err = 0;
60e6473092SMatt Mackall 
61e6473092SMatt Mackall 	pud = pud_offset(pgd, addr);
62e6473092SMatt Mackall 	do {
63e6473092SMatt Mackall 		next = pud_addr_end(addr, end);
64e6473092SMatt Mackall 		if (pud_none_or_clear_bad(pud)) {
65e6473092SMatt Mackall 			if (walk->pte_hole)
66*2165009bSDave Hansen 				err = walk->pte_hole(addr, next, walk);
67e6473092SMatt Mackall 			if (err)
68e6473092SMatt Mackall 				break;
69e6473092SMatt Mackall 			continue;
70e6473092SMatt Mackall 		}
71e6473092SMatt Mackall 		if (walk->pud_entry)
72*2165009bSDave Hansen 			err = walk->pud_entry(pud, addr, next, walk);
73e6473092SMatt Mackall 		if (!err && (walk->pmd_entry || walk->pte_entry))
74*2165009bSDave Hansen 			err = walk_pmd_range(pud, addr, next, walk);
75e6473092SMatt Mackall 		if (err)
76e6473092SMatt Mackall 			break;
77e6473092SMatt Mackall 	} while (pud++, addr = next, addr != end);
78e6473092SMatt Mackall 
79e6473092SMatt Mackall 	return err;
80e6473092SMatt Mackall }
81e6473092SMatt Mackall 
82e6473092SMatt Mackall /**
83e6473092SMatt Mackall  * walk_page_range - walk a memory map's page tables with a callback
847682486bSRandy Dunlap  * @mm: memory map to walk
857682486bSRandy Dunlap  * @addr: starting address
867682486bSRandy Dunlap  * @end: ending address
877682486bSRandy Dunlap  * @walk: set of callbacks to invoke for each level of the tree
88e6473092SMatt Mackall  *
89e6473092SMatt Mackall  * Recursively walk the page table for the memory area in a VMA,
90e6473092SMatt Mackall  * calling supplied callbacks. Callbacks are called in-order (first
91e6473092SMatt Mackall  * PGD, first PUD, first PMD, first PTE, second PTE... second PMD,
92e6473092SMatt Mackall  * etc.). If lower-level callbacks are omitted, walking depth is reduced.
93e6473092SMatt Mackall  *
94*2165009bSDave Hansen  * Each callback receives an entry pointer and the start and end of the
95*2165009bSDave Hansen  * associated range, and a copy of the original mm_walk for access to
96*2165009bSDave Hansen  * the ->private or ->mm fields.
97e6473092SMatt Mackall  *
98e6473092SMatt Mackall  * No locks are taken, but the bottom level iterator will map PTE
99e6473092SMatt Mackall  * directories from highmem if necessary.
100e6473092SMatt Mackall  *
101e6473092SMatt Mackall  * If any callback returns a non-zero value, the walk is aborted and
102e6473092SMatt Mackall  * the return value is propagated back to the caller. Otherwise 0 is returned.
103e6473092SMatt Mackall  */
104*2165009bSDave Hansen int walk_page_range(unsigned long addr, unsigned long end,
105*2165009bSDave Hansen 		    struct mm_walk *walk)
106e6473092SMatt Mackall {
107e6473092SMatt Mackall 	pgd_t *pgd;
108e6473092SMatt Mackall 	unsigned long next;
109e6473092SMatt Mackall 	int err = 0;
110e6473092SMatt Mackall 
111e6473092SMatt Mackall 	if (addr >= end)
112e6473092SMatt Mackall 		return err;
113e6473092SMatt Mackall 
114*2165009bSDave Hansen 	if (!walk->mm)
115*2165009bSDave Hansen 		return -EINVAL;
116*2165009bSDave Hansen 
117*2165009bSDave Hansen 	pgd = pgd_offset(walk->mm, addr);
118e6473092SMatt Mackall 	do {
119e6473092SMatt Mackall 		next = pgd_addr_end(addr, end);
120e6473092SMatt Mackall 		if (pgd_none_or_clear_bad(pgd)) {
121e6473092SMatt Mackall 			if (walk->pte_hole)
122*2165009bSDave Hansen 				err = walk->pte_hole(addr, next, walk);
123e6473092SMatt Mackall 			if (err)
124e6473092SMatt Mackall 				break;
125e6473092SMatt Mackall 			continue;
126e6473092SMatt Mackall 		}
127e6473092SMatt Mackall 		if (walk->pgd_entry)
128*2165009bSDave Hansen 			err = walk->pgd_entry(pgd, addr, next, walk);
129e6473092SMatt Mackall 		if (!err &&
130e6473092SMatt Mackall 		    (walk->pud_entry || walk->pmd_entry || walk->pte_entry))
131*2165009bSDave Hansen 			err = walk_pud_range(pgd, addr, next, walk);
132e6473092SMatt Mackall 		if (err)
133e6473092SMatt Mackall 			break;
134e6473092SMatt Mackall 	} while (pgd++, addr = next, addr != end);
135e6473092SMatt Mackall 
136e6473092SMatt Mackall 	return err;
137e6473092SMatt Mackall }
138