xref: /linux/mm/pagewalk.c (revision 5dc37642cbce34619e4588a9f0bdad1d2f870956)
1e6473092SMatt Mackall #include <linux/mm.h>
2e6473092SMatt Mackall #include <linux/highmem.h>
3e6473092SMatt Mackall #include <linux/sched.h>
4d33b9f45SNaoya Horiguchi #include <linux/hugetlb.h>
5e6473092SMatt Mackall 
6e6473092SMatt Mackall static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
72165009bSDave Hansen 			  struct mm_walk *walk)
8e6473092SMatt Mackall {
9e6473092SMatt Mackall 	pte_t *pte;
10e6473092SMatt Mackall 	int err = 0;
11e6473092SMatt Mackall 
12e6473092SMatt Mackall 	pte = pte_offset_map(pmd, addr);
13556637cdSJohannes Weiner 	for (;;) {
142165009bSDave Hansen 		err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
15e6473092SMatt Mackall 		if (err)
16e6473092SMatt Mackall 		       break;
17556637cdSJohannes Weiner 		addr += PAGE_SIZE;
18556637cdSJohannes Weiner 		if (addr == end)
19556637cdSJohannes Weiner 			break;
20556637cdSJohannes Weiner 		pte++;
21556637cdSJohannes Weiner 	}
22e6473092SMatt Mackall 
23e6473092SMatt Mackall 	pte_unmap(pte);
24e6473092SMatt Mackall 	return err;
25e6473092SMatt Mackall }
26e6473092SMatt Mackall 
27e6473092SMatt Mackall static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
282165009bSDave Hansen 			  struct mm_walk *walk)
29e6473092SMatt Mackall {
30e6473092SMatt Mackall 	pmd_t *pmd;
31e6473092SMatt Mackall 	unsigned long next;
32e6473092SMatt Mackall 	int err = 0;
33e6473092SMatt Mackall 
34e6473092SMatt Mackall 	pmd = pmd_offset(pud, addr);
35e6473092SMatt Mackall 	do {
36e6473092SMatt Mackall 		next = pmd_addr_end(addr, end);
37e6473092SMatt Mackall 		if (pmd_none_or_clear_bad(pmd)) {
38e6473092SMatt Mackall 			if (walk->pte_hole)
392165009bSDave Hansen 				err = walk->pte_hole(addr, next, walk);
40e6473092SMatt Mackall 			if (err)
41e6473092SMatt Mackall 				break;
42e6473092SMatt Mackall 			continue;
43e6473092SMatt Mackall 		}
44e6473092SMatt Mackall 		if (walk->pmd_entry)
452165009bSDave Hansen 			err = walk->pmd_entry(pmd, addr, next, walk);
46e6473092SMatt Mackall 		if (!err && walk->pte_entry)
472165009bSDave Hansen 			err = walk_pte_range(pmd, addr, next, walk);
48e6473092SMatt Mackall 		if (err)
49e6473092SMatt Mackall 			break;
50e6473092SMatt Mackall 	} while (pmd++, addr = next, addr != end);
51e6473092SMatt Mackall 
52e6473092SMatt Mackall 	return err;
53e6473092SMatt Mackall }
54e6473092SMatt Mackall 
55e6473092SMatt Mackall static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
562165009bSDave Hansen 			  struct mm_walk *walk)
57e6473092SMatt Mackall {
58e6473092SMatt Mackall 	pud_t *pud;
59e6473092SMatt Mackall 	unsigned long next;
60e6473092SMatt Mackall 	int err = 0;
61e6473092SMatt Mackall 
62e6473092SMatt Mackall 	pud = pud_offset(pgd, addr);
63e6473092SMatt Mackall 	do {
64e6473092SMatt Mackall 		next = pud_addr_end(addr, end);
65e6473092SMatt Mackall 		if (pud_none_or_clear_bad(pud)) {
66e6473092SMatt Mackall 			if (walk->pte_hole)
672165009bSDave Hansen 				err = walk->pte_hole(addr, next, walk);
68e6473092SMatt Mackall 			if (err)
69e6473092SMatt Mackall 				break;
70e6473092SMatt Mackall 			continue;
71e6473092SMatt Mackall 		}
72e6473092SMatt Mackall 		if (walk->pud_entry)
732165009bSDave Hansen 			err = walk->pud_entry(pud, addr, next, walk);
74e6473092SMatt Mackall 		if (!err && (walk->pmd_entry || walk->pte_entry))
752165009bSDave Hansen 			err = walk_pmd_range(pud, addr, next, walk);
76e6473092SMatt Mackall 		if (err)
77e6473092SMatt Mackall 			break;
78e6473092SMatt Mackall 	} while (pud++, addr = next, addr != end);
79e6473092SMatt Mackall 
80e6473092SMatt Mackall 	return err;
81e6473092SMatt Mackall }
82e6473092SMatt Mackall 
83e6473092SMatt Mackall /**
84e6473092SMatt Mackall  * walk_page_range - walk a memory map's page tables with a callback
857682486bSRandy Dunlap  * @mm: memory map to walk
867682486bSRandy Dunlap  * @addr: starting address
877682486bSRandy Dunlap  * @end: ending address
887682486bSRandy Dunlap  * @walk: set of callbacks to invoke for each level of the tree
89e6473092SMatt Mackall  *
90e6473092SMatt Mackall  * Recursively walk the page table for the memory area in a VMA,
91e6473092SMatt Mackall  * calling supplied callbacks. Callbacks are called in-order (first
92e6473092SMatt Mackall  * PGD, first PUD, first PMD, first PTE, second PTE... second PMD,
93e6473092SMatt Mackall  * etc.). If lower-level callbacks are omitted, walking depth is reduced.
94e6473092SMatt Mackall  *
952165009bSDave Hansen  * Each callback receives an entry pointer and the start and end of the
962165009bSDave Hansen  * associated range, and a copy of the original mm_walk for access to
972165009bSDave Hansen  * the ->private or ->mm fields.
98e6473092SMatt Mackall  *
99e6473092SMatt Mackall  * No locks are taken, but the bottom level iterator will map PTE
100e6473092SMatt Mackall  * directories from highmem if necessary.
101e6473092SMatt Mackall  *
102e6473092SMatt Mackall  * If any callback returns a non-zero value, the walk is aborted and
103e6473092SMatt Mackall  * the return value is propagated back to the caller. Otherwise 0 is returned.
104e6473092SMatt Mackall  */
1052165009bSDave Hansen int walk_page_range(unsigned long addr, unsigned long end,
1062165009bSDave Hansen 		    struct mm_walk *walk)
107e6473092SMatt Mackall {
108e6473092SMatt Mackall 	pgd_t *pgd;
109e6473092SMatt Mackall 	unsigned long next;
110e6473092SMatt Mackall 	int err = 0;
111d33b9f45SNaoya Horiguchi 	struct vm_area_struct *vma;
112e6473092SMatt Mackall 
113e6473092SMatt Mackall 	if (addr >= end)
114e6473092SMatt Mackall 		return err;
115e6473092SMatt Mackall 
1162165009bSDave Hansen 	if (!walk->mm)
1172165009bSDave Hansen 		return -EINVAL;
1182165009bSDave Hansen 
1192165009bSDave Hansen 	pgd = pgd_offset(walk->mm, addr);
120e6473092SMatt Mackall 	do {
121e6473092SMatt Mackall 		next = pgd_addr_end(addr, end);
122d33b9f45SNaoya Horiguchi 
123*5dc37642SNaoya Horiguchi 		/*
124*5dc37642SNaoya Horiguchi 		 * handle hugetlb vma individually because pagetable walk for
125*5dc37642SNaoya Horiguchi 		 * the hugetlb page is dependent on the architecture and
126*5dc37642SNaoya Horiguchi 		 * we can't handled it in the same manner as non-huge pages.
127*5dc37642SNaoya Horiguchi 		 */
128d33b9f45SNaoya Horiguchi 		vma = find_vma(walk->mm, addr);
129*5dc37642SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
130d33b9f45SNaoya Horiguchi 		if (vma && is_vm_hugetlb_page(vma)) {
131*5dc37642SNaoya Horiguchi 			pte_t *pte;
132*5dc37642SNaoya Horiguchi 			struct hstate *hs;
133*5dc37642SNaoya Horiguchi 
134d33b9f45SNaoya Horiguchi 			if (vma->vm_end < next)
135d33b9f45SNaoya Horiguchi 				next = vma->vm_end;
136*5dc37642SNaoya Horiguchi 			hs = hstate_vma(vma);
137*5dc37642SNaoya Horiguchi 			pte = huge_pte_offset(walk->mm,
138*5dc37642SNaoya Horiguchi 					      addr & huge_page_mask(hs));
139*5dc37642SNaoya Horiguchi 			if (pte && !huge_pte_none(huge_ptep_get(pte))
140*5dc37642SNaoya Horiguchi 			    && walk->hugetlb_entry)
141*5dc37642SNaoya Horiguchi 				err = walk->hugetlb_entry(pte, addr,
142*5dc37642SNaoya Horiguchi 							  next, walk);
143*5dc37642SNaoya Horiguchi 			if (err)
144*5dc37642SNaoya Horiguchi 				break;
145d33b9f45SNaoya Horiguchi 			continue;
146d33b9f45SNaoya Horiguchi 		}
147*5dc37642SNaoya Horiguchi #endif
148e6473092SMatt Mackall 		if (pgd_none_or_clear_bad(pgd)) {
149e6473092SMatt Mackall 			if (walk->pte_hole)
1502165009bSDave Hansen 				err = walk->pte_hole(addr, next, walk);
151e6473092SMatt Mackall 			if (err)
152e6473092SMatt Mackall 				break;
153d33b9f45SNaoya Horiguchi 			pgd++;
154e6473092SMatt Mackall 			continue;
155e6473092SMatt Mackall 		}
156e6473092SMatt Mackall 		if (walk->pgd_entry)
1572165009bSDave Hansen 			err = walk->pgd_entry(pgd, addr, next, walk);
158e6473092SMatt Mackall 		if (!err &&
159e6473092SMatt Mackall 		    (walk->pud_entry || walk->pmd_entry || walk->pte_entry))
1602165009bSDave Hansen 			err = walk_pud_range(pgd, addr, next, walk);
161e6473092SMatt Mackall 		if (err)
162e6473092SMatt Mackall 			break;
163d33b9f45SNaoya Horiguchi 		pgd++;
164d33b9f45SNaoya Horiguchi 	} while (addr = next, addr != end);
165e6473092SMatt Mackall 
166e6473092SMatt Mackall 	return err;
167e6473092SMatt Mackall }
168