xref: /linux/mm/pagewalk.c (revision 116354d177ba2da37e91cf884e3d11e67f825efd)
1e6473092SMatt Mackall #include <linux/mm.h>
2e6473092SMatt Mackall #include <linux/highmem.h>
3e6473092SMatt Mackall #include <linux/sched.h>
4d33b9f45SNaoya Horiguchi #include <linux/hugetlb.h>
5e6473092SMatt Mackall 
6e6473092SMatt Mackall static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
72165009bSDave Hansen 			  struct mm_walk *walk)
8e6473092SMatt Mackall {
9e6473092SMatt Mackall 	pte_t *pte;
10e6473092SMatt Mackall 	int err = 0;
11e6473092SMatt Mackall 
12e6473092SMatt Mackall 	pte = pte_offset_map(pmd, addr);
13556637cdSJohannes Weiner 	for (;;) {
142165009bSDave Hansen 		err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
15e6473092SMatt Mackall 		if (err)
16e6473092SMatt Mackall 		       break;
17556637cdSJohannes Weiner 		addr += PAGE_SIZE;
18556637cdSJohannes Weiner 		if (addr == end)
19556637cdSJohannes Weiner 			break;
20556637cdSJohannes Weiner 		pte++;
21556637cdSJohannes Weiner 	}
22e6473092SMatt Mackall 
23e6473092SMatt Mackall 	pte_unmap(pte);
24e6473092SMatt Mackall 	return err;
25e6473092SMatt Mackall }
26e6473092SMatt Mackall 
27e6473092SMatt Mackall static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
282165009bSDave Hansen 			  struct mm_walk *walk)
29e6473092SMatt Mackall {
30e6473092SMatt Mackall 	pmd_t *pmd;
31e6473092SMatt Mackall 	unsigned long next;
32e6473092SMatt Mackall 	int err = 0;
33e6473092SMatt Mackall 
34e6473092SMatt Mackall 	pmd = pmd_offset(pud, addr);
35e6473092SMatt Mackall 	do {
36e6473092SMatt Mackall 		next = pmd_addr_end(addr, end);
37e6473092SMatt Mackall 		if (pmd_none_or_clear_bad(pmd)) {
38e6473092SMatt Mackall 			if (walk->pte_hole)
392165009bSDave Hansen 				err = walk->pte_hole(addr, next, walk);
40e6473092SMatt Mackall 			if (err)
41e6473092SMatt Mackall 				break;
42e6473092SMatt Mackall 			continue;
43e6473092SMatt Mackall 		}
44e6473092SMatt Mackall 		if (walk->pmd_entry)
452165009bSDave Hansen 			err = walk->pmd_entry(pmd, addr, next, walk);
46e6473092SMatt Mackall 		if (!err && walk->pte_entry)
472165009bSDave Hansen 			err = walk_pte_range(pmd, addr, next, walk);
48e6473092SMatt Mackall 		if (err)
49e6473092SMatt Mackall 			break;
50e6473092SMatt Mackall 	} while (pmd++, addr = next, addr != end);
51e6473092SMatt Mackall 
52e6473092SMatt Mackall 	return err;
53e6473092SMatt Mackall }
54e6473092SMatt Mackall 
55e6473092SMatt Mackall static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
562165009bSDave Hansen 			  struct mm_walk *walk)
57e6473092SMatt Mackall {
58e6473092SMatt Mackall 	pud_t *pud;
59e6473092SMatt Mackall 	unsigned long next;
60e6473092SMatt Mackall 	int err = 0;
61e6473092SMatt Mackall 
62e6473092SMatt Mackall 	pud = pud_offset(pgd, addr);
63e6473092SMatt Mackall 	do {
64e6473092SMatt Mackall 		next = pud_addr_end(addr, end);
65e6473092SMatt Mackall 		if (pud_none_or_clear_bad(pud)) {
66e6473092SMatt Mackall 			if (walk->pte_hole)
672165009bSDave Hansen 				err = walk->pte_hole(addr, next, walk);
68e6473092SMatt Mackall 			if (err)
69e6473092SMatt Mackall 				break;
70e6473092SMatt Mackall 			continue;
71e6473092SMatt Mackall 		}
72e6473092SMatt Mackall 		if (walk->pud_entry)
732165009bSDave Hansen 			err = walk->pud_entry(pud, addr, next, walk);
74e6473092SMatt Mackall 		if (!err && (walk->pmd_entry || walk->pte_entry))
752165009bSDave Hansen 			err = walk_pmd_range(pud, addr, next, walk);
76e6473092SMatt Mackall 		if (err)
77e6473092SMatt Mackall 			break;
78e6473092SMatt Mackall 	} while (pud++, addr = next, addr != end);
79e6473092SMatt Mackall 
80e6473092SMatt Mackall 	return err;
81e6473092SMatt Mackall }
82e6473092SMatt Mackall 
83*116354d1SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
84*116354d1SNaoya Horiguchi static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
85*116354d1SNaoya Horiguchi 				       unsigned long end)
86*116354d1SNaoya Horiguchi {
87*116354d1SNaoya Horiguchi 	unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
88*116354d1SNaoya Horiguchi 	return boundary < end ? boundary : end;
89*116354d1SNaoya Horiguchi }
90*116354d1SNaoya Horiguchi 
91*116354d1SNaoya Horiguchi static int walk_hugetlb_range(struct vm_area_struct *vma,
92*116354d1SNaoya Horiguchi 			      unsigned long addr, unsigned long end,
93*116354d1SNaoya Horiguchi 			      struct mm_walk *walk)
94*116354d1SNaoya Horiguchi {
95*116354d1SNaoya Horiguchi 	struct hstate *h = hstate_vma(vma);
96*116354d1SNaoya Horiguchi 	unsigned long next;
97*116354d1SNaoya Horiguchi 	unsigned long hmask = huge_page_mask(h);
98*116354d1SNaoya Horiguchi 	pte_t *pte;
99*116354d1SNaoya Horiguchi 	int err = 0;
100*116354d1SNaoya Horiguchi 
101*116354d1SNaoya Horiguchi 	do {
102*116354d1SNaoya Horiguchi 		next = hugetlb_entry_end(h, addr, end);
103*116354d1SNaoya Horiguchi 		pte = huge_pte_offset(walk->mm, addr & hmask);
104*116354d1SNaoya Horiguchi 		if (pte && walk->hugetlb_entry)
105*116354d1SNaoya Horiguchi 			err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
106*116354d1SNaoya Horiguchi 		if (err)
107*116354d1SNaoya Horiguchi 			return err;
108*116354d1SNaoya Horiguchi 	} while (addr = next, addr != end);
109*116354d1SNaoya Horiguchi 
110*116354d1SNaoya Horiguchi 	return 0;
111*116354d1SNaoya Horiguchi }
112*116354d1SNaoya Horiguchi #endif
113*116354d1SNaoya Horiguchi 
114e6473092SMatt Mackall /**
115e6473092SMatt Mackall  * walk_page_range - walk a memory map's page tables with a callback
1167682486bSRandy Dunlap  * @mm: memory map to walk
1177682486bSRandy Dunlap  * @addr: starting address
1187682486bSRandy Dunlap  * @end: ending address
1197682486bSRandy Dunlap  * @walk: set of callbacks to invoke for each level of the tree
120e6473092SMatt Mackall  *
121e6473092SMatt Mackall  * Recursively walk the page table for the memory area in a VMA,
122e6473092SMatt Mackall  * calling supplied callbacks. Callbacks are called in-order (first
123e6473092SMatt Mackall  * PGD, first PUD, first PMD, first PTE, second PTE... second PMD,
124e6473092SMatt Mackall  * etc.). If lower-level callbacks are omitted, walking depth is reduced.
125e6473092SMatt Mackall  *
1262165009bSDave Hansen  * Each callback receives an entry pointer and the start and end of the
1272165009bSDave Hansen  * associated range, and a copy of the original mm_walk for access to
1282165009bSDave Hansen  * the ->private or ->mm fields.
129e6473092SMatt Mackall  *
130e6473092SMatt Mackall  * No locks are taken, but the bottom level iterator will map PTE
131e6473092SMatt Mackall  * directories from highmem if necessary.
132e6473092SMatt Mackall  *
133e6473092SMatt Mackall  * If any callback returns a non-zero value, the walk is aborted and
134e6473092SMatt Mackall  * the return value is propagated back to the caller. Otherwise 0 is returned.
135e6473092SMatt Mackall  */
1362165009bSDave Hansen int walk_page_range(unsigned long addr, unsigned long end,
1372165009bSDave Hansen 		    struct mm_walk *walk)
138e6473092SMatt Mackall {
139e6473092SMatt Mackall 	pgd_t *pgd;
140e6473092SMatt Mackall 	unsigned long next;
141e6473092SMatt Mackall 	int err = 0;
142d33b9f45SNaoya Horiguchi 	struct vm_area_struct *vma;
143e6473092SMatt Mackall 
144e6473092SMatt Mackall 	if (addr >= end)
145e6473092SMatt Mackall 		return err;
146e6473092SMatt Mackall 
1472165009bSDave Hansen 	if (!walk->mm)
1482165009bSDave Hansen 		return -EINVAL;
1492165009bSDave Hansen 
1502165009bSDave Hansen 	pgd = pgd_offset(walk->mm, addr);
151e6473092SMatt Mackall 	do {
152e6473092SMatt Mackall 		next = pgd_addr_end(addr, end);
153d33b9f45SNaoya Horiguchi 
1545dc37642SNaoya Horiguchi 		/*
1555dc37642SNaoya Horiguchi 		 * handle hugetlb vma individually because pagetable walk for
1565dc37642SNaoya Horiguchi 		 * the hugetlb page is dependent on the architecture and
1575dc37642SNaoya Horiguchi 		 * we can't handled it in the same manner as non-huge pages.
1585dc37642SNaoya Horiguchi 		 */
159d33b9f45SNaoya Horiguchi 		vma = find_vma(walk->mm, addr);
1605dc37642SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
161d33b9f45SNaoya Horiguchi 		if (vma && is_vm_hugetlb_page(vma)) {
162d33b9f45SNaoya Horiguchi 			if (vma->vm_end < next)
163d33b9f45SNaoya Horiguchi 				next = vma->vm_end;
164*116354d1SNaoya Horiguchi 			/*
165*116354d1SNaoya Horiguchi 			 * Hugepage is very tightly coupled with vma, so
166*116354d1SNaoya Horiguchi 			 * walk through hugetlb entries within a given vma.
167*116354d1SNaoya Horiguchi 			 */
168*116354d1SNaoya Horiguchi 			err = walk_hugetlb_range(vma, addr, next, walk);
1695dc37642SNaoya Horiguchi 			if (err)
1705dc37642SNaoya Horiguchi 				break;
171*116354d1SNaoya Horiguchi 			pgd = pgd_offset(walk->mm, next);
172d33b9f45SNaoya Horiguchi 			continue;
173d33b9f45SNaoya Horiguchi 		}
1745dc37642SNaoya Horiguchi #endif
175e6473092SMatt Mackall 		if (pgd_none_or_clear_bad(pgd)) {
176e6473092SMatt Mackall 			if (walk->pte_hole)
1772165009bSDave Hansen 				err = walk->pte_hole(addr, next, walk);
178e6473092SMatt Mackall 			if (err)
179e6473092SMatt Mackall 				break;
180d33b9f45SNaoya Horiguchi 			pgd++;
181e6473092SMatt Mackall 			continue;
182e6473092SMatt Mackall 		}
183e6473092SMatt Mackall 		if (walk->pgd_entry)
1842165009bSDave Hansen 			err = walk->pgd_entry(pgd, addr, next, walk);
185e6473092SMatt Mackall 		if (!err &&
186e6473092SMatt Mackall 		    (walk->pud_entry || walk->pmd_entry || walk->pte_entry))
1872165009bSDave Hansen 			err = walk_pud_range(pgd, addr, next, walk);
188e6473092SMatt Mackall 		if (err)
189e6473092SMatt Mackall 			break;
190d33b9f45SNaoya Horiguchi 		pgd++;
191d33b9f45SNaoya Horiguchi 	} while (addr = next, addr != end);
192e6473092SMatt Mackall 
193e6473092SMatt Mackall 	return err;
194e6473092SMatt Mackall }
195