xref: /linux/mm/mapping_dirty_helpers.c (revision a1c613ae4c322ddd58d5a8539dbfba2a0380a8c0)
1c5acad84SThomas Hellstrom // SPDX-License-Identifier: GPL-2.0
2c5acad84SThomas Hellstrom #include <linux/pagewalk.h>
3c5acad84SThomas Hellstrom #include <linux/hugetlb.h>
4c5acad84SThomas Hellstrom #include <linux/bitops.h>
5c5acad84SThomas Hellstrom #include <linux/mmu_notifier.h>
636090defSArnd Bergmann #include <linux/mm_inline.h>
7c5acad84SThomas Hellstrom #include <asm/cacheflush.h>
8c5acad84SThomas Hellstrom #include <asm/tlbflush.h>
9c5acad84SThomas Hellstrom 
10c5acad84SThomas Hellstrom /**
11c5acad84SThomas Hellstrom  * struct wp_walk - Private struct for pagetable walk callbacks
12c5acad84SThomas Hellstrom  * @range: Range for mmu notifiers
13c5acad84SThomas Hellstrom  * @tlbflush_start: Address of first modified pte
14c5acad84SThomas Hellstrom  * @tlbflush_end: Address of last modified pte + 1
15c5acad84SThomas Hellstrom  * @total: Total number of modified ptes
16c5acad84SThomas Hellstrom  */
17c5acad84SThomas Hellstrom struct wp_walk {
18c5acad84SThomas Hellstrom 	struct mmu_notifier_range range;
19c5acad84SThomas Hellstrom 	unsigned long tlbflush_start;
20c5acad84SThomas Hellstrom 	unsigned long tlbflush_end;
21c5acad84SThomas Hellstrom 	unsigned long total;
22c5acad84SThomas Hellstrom };
23c5acad84SThomas Hellstrom 
24c5acad84SThomas Hellstrom /**
25c5acad84SThomas Hellstrom  * wp_pte - Write-protect a pte
26c5acad84SThomas Hellstrom  * @pte: Pointer to the pte
27f5b7e739SAlex Shi  * @addr: The start of protecting virtual address
28f5b7e739SAlex Shi  * @end: The end of protecting virtual address
29c5acad84SThomas Hellstrom  * @walk: pagetable walk callback argument
30c5acad84SThomas Hellstrom  *
31c5acad84SThomas Hellstrom  * The function write-protects a pte and records the range in
32c5acad84SThomas Hellstrom  * virtual address space of touched ptes for efficient range TLB flushes.
33c5acad84SThomas Hellstrom  */
wp_pte(pte_t * pte,unsigned long addr,unsigned long end,struct mm_walk * walk)34c5acad84SThomas Hellstrom static int wp_pte(pte_t *pte, unsigned long addr, unsigned long end,
35c5acad84SThomas Hellstrom 		  struct mm_walk *walk)
36c5acad84SThomas Hellstrom {
37c5acad84SThomas Hellstrom 	struct wp_walk *wpwalk = walk->private;
38c33c7948SRyan Roberts 	pte_t ptent = ptep_get(pte);
39c5acad84SThomas Hellstrom 
40c5acad84SThomas Hellstrom 	if (pte_write(ptent)) {
41c5acad84SThomas Hellstrom 		pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte);
42c5acad84SThomas Hellstrom 
43c5acad84SThomas Hellstrom 		ptent = pte_wrprotect(old_pte);
44c5acad84SThomas Hellstrom 		ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent);
45c5acad84SThomas Hellstrom 		wpwalk->total++;
46c5acad84SThomas Hellstrom 		wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr);
47c5acad84SThomas Hellstrom 		wpwalk->tlbflush_end = max(wpwalk->tlbflush_end,
48c5acad84SThomas Hellstrom 					   addr + PAGE_SIZE);
49c5acad84SThomas Hellstrom 	}
50c5acad84SThomas Hellstrom 
51c5acad84SThomas Hellstrom 	return 0;
52c5acad84SThomas Hellstrom }
53c5acad84SThomas Hellstrom 
54c5acad84SThomas Hellstrom /**
55c5acad84SThomas Hellstrom  * struct clean_walk - Private struct for the clean_record_pte function.
56c5acad84SThomas Hellstrom  * @base: struct wp_walk we derive from
57c5acad84SThomas Hellstrom  * @bitmap_pgoff: Address_space Page offset of the first bit in @bitmap
58c5acad84SThomas Hellstrom  * @bitmap: Bitmap with one bit for each page offset in the address_space range
59c5acad84SThomas Hellstrom  * covered.
60c5acad84SThomas Hellstrom  * @start: Address_space page offset of first modified pte relative
61c5acad84SThomas Hellstrom  * to @bitmap_pgoff
62c5acad84SThomas Hellstrom  * @end: Address_space page offset of last modified pte relative
63c5acad84SThomas Hellstrom  * to @bitmap_pgoff
64c5acad84SThomas Hellstrom  */
65c5acad84SThomas Hellstrom struct clean_walk {
66c5acad84SThomas Hellstrom 	struct wp_walk base;
67c5acad84SThomas Hellstrom 	pgoff_t bitmap_pgoff;
68c5acad84SThomas Hellstrom 	unsigned long *bitmap;
69c5acad84SThomas Hellstrom 	pgoff_t start;
70c5acad84SThomas Hellstrom 	pgoff_t end;
71c5acad84SThomas Hellstrom };
72c5acad84SThomas Hellstrom 
73c5acad84SThomas Hellstrom #define to_clean_walk(_wpwalk) container_of(_wpwalk, struct clean_walk, base)
74c5acad84SThomas Hellstrom 
75c5acad84SThomas Hellstrom /**
76c5acad84SThomas Hellstrom  * clean_record_pte - Clean a pte and record its address space offset in a
77c5acad84SThomas Hellstrom  * bitmap
78c5acad84SThomas Hellstrom  * @pte: Pointer to the pte
79f5b7e739SAlex Shi  * @addr: The start of virtual address to be clean
80f5b7e739SAlex Shi  * @end: The end of virtual address to be clean
81c5acad84SThomas Hellstrom  * @walk: pagetable walk callback argument
82c5acad84SThomas Hellstrom  *
83c5acad84SThomas Hellstrom  * The function cleans a pte and records the range in
84c5acad84SThomas Hellstrom  * virtual address space of touched ptes for efficient TLB flushes.
85c5acad84SThomas Hellstrom  * It also records dirty ptes in a bitmap representing page offsets
86c5acad84SThomas Hellstrom  * in the address_space, as well as the first and last of the bits
87c5acad84SThomas Hellstrom  * touched.
88c5acad84SThomas Hellstrom  */
clean_record_pte(pte_t * pte,unsigned long addr,unsigned long end,struct mm_walk * walk)89c5acad84SThomas Hellstrom static int clean_record_pte(pte_t *pte, unsigned long addr,
90c5acad84SThomas Hellstrom 			    unsigned long end, struct mm_walk *walk)
91c5acad84SThomas Hellstrom {
92c5acad84SThomas Hellstrom 	struct wp_walk *wpwalk = walk->private;
93c5acad84SThomas Hellstrom 	struct clean_walk *cwalk = to_clean_walk(wpwalk);
94c33c7948SRyan Roberts 	pte_t ptent = ptep_get(pte);
95c5acad84SThomas Hellstrom 
96c5acad84SThomas Hellstrom 	if (pte_dirty(ptent)) {
97c5acad84SThomas Hellstrom 		pgoff_t pgoff = ((addr - walk->vma->vm_start) >> PAGE_SHIFT) +
98c5acad84SThomas Hellstrom 			walk->vma->vm_pgoff - cwalk->bitmap_pgoff;
99c5acad84SThomas Hellstrom 		pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte);
100c5acad84SThomas Hellstrom 
101c5acad84SThomas Hellstrom 		ptent = pte_mkclean(old_pte);
102c5acad84SThomas Hellstrom 		ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent);
103c5acad84SThomas Hellstrom 
104c5acad84SThomas Hellstrom 		wpwalk->total++;
105c5acad84SThomas Hellstrom 		wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr);
106c5acad84SThomas Hellstrom 		wpwalk->tlbflush_end = max(wpwalk->tlbflush_end,
107c5acad84SThomas Hellstrom 					   addr + PAGE_SIZE);
108c5acad84SThomas Hellstrom 
109c5acad84SThomas Hellstrom 		__set_bit(pgoff, cwalk->bitmap);
110c5acad84SThomas Hellstrom 		cwalk->start = min(cwalk->start, pgoff);
111c5acad84SThomas Hellstrom 		cwalk->end = max(cwalk->end, pgoff + 1);
112c5acad84SThomas Hellstrom 	}
113c5acad84SThomas Hellstrom 
114c5acad84SThomas Hellstrom 	return 0;
115c5acad84SThomas Hellstrom }
116c5acad84SThomas Hellstrom 
117b2a403fdSThomas Hellstrom /*
118b2a403fdSThomas Hellstrom  * wp_clean_pmd_entry - The pagewalk pmd callback.
119b2a403fdSThomas Hellstrom  *
120b2a403fdSThomas Hellstrom  * Dirty-tracking should take place on the PTE level, so
121b2a403fdSThomas Hellstrom  * WARN() if encountering a dirty huge pmd.
122b2a403fdSThomas Hellstrom  * Furthermore, never split huge pmds, since that currently
123b2a403fdSThomas Hellstrom  * causes dirty info loss. The pagefault handler should do
124b2a403fdSThomas Hellstrom  * that if needed.
125b2a403fdSThomas Hellstrom  */
wp_clean_pmd_entry(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)126c5acad84SThomas Hellstrom static int wp_clean_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long end,
127c5acad84SThomas Hellstrom 			      struct mm_walk *walk)
128c5acad84SThomas Hellstrom {
129dab6e717SPeter Zijlstra 	pmd_t pmdval = pmdp_get_lockless(pmd);
130c5acad84SThomas Hellstrom 
131e5ad581cSHugh Dickins 	/* Do not split a huge pmd, present or migrated */
132e5ad581cSHugh Dickins 	if (pmd_trans_huge(pmdval) || pmd_devmap(pmdval)) {
133c5acad84SThomas Hellstrom 		WARN_ON(pmd_write(pmdval) || pmd_dirty(pmdval));
134e5ad581cSHugh Dickins 		walk->action = ACTION_CONTINUE;
135e5ad581cSHugh Dickins 	}
136c5acad84SThomas Hellstrom 	return 0;
137c5acad84SThomas Hellstrom }
138c5acad84SThomas Hellstrom 
139b2a403fdSThomas Hellstrom /*
140b2a403fdSThomas Hellstrom  * wp_clean_pud_entry - The pagewalk pud callback.
141b2a403fdSThomas Hellstrom  *
142b2a403fdSThomas Hellstrom  * Dirty-tracking should take place on the PTE level, so
143b2a403fdSThomas Hellstrom  * WARN() if encountering a dirty huge puds.
144b2a403fdSThomas Hellstrom  * Furthermore, never split huge puds, since that currently
145b2a403fdSThomas Hellstrom  * causes dirty info loss. The pagefault handler should do
146b2a403fdSThomas Hellstrom  * that if needed.
147b2a403fdSThomas Hellstrom  */
wp_clean_pud_entry(pud_t * pud,unsigned long addr,unsigned long end,struct mm_walk * walk)148c5acad84SThomas Hellstrom static int wp_clean_pud_entry(pud_t *pud, unsigned long addr, unsigned long end,
149c5acad84SThomas Hellstrom 			      struct mm_walk *walk)
150c5acad84SThomas Hellstrom {
151e5ad581cSHugh Dickins #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
152c5acad84SThomas Hellstrom 	pud_t pudval = READ_ONCE(*pud);
153c5acad84SThomas Hellstrom 
154e5ad581cSHugh Dickins 	/* Do not split a huge pud */
155e5ad581cSHugh Dickins 	if (pud_trans_huge(pudval) || pud_devmap(pudval)) {
156c5acad84SThomas Hellstrom 		WARN_ON(pud_write(pudval) || pud_dirty(pudval));
157e5ad581cSHugh Dickins 		walk->action = ACTION_CONTINUE;
158e5ad581cSHugh Dickins 	}
15994036f4cSZack Rusin #endif
160c5acad84SThomas Hellstrom 	return 0;
161c5acad84SThomas Hellstrom }
162c5acad84SThomas Hellstrom 
163c5acad84SThomas Hellstrom /*
164c5acad84SThomas Hellstrom  * wp_clean_pre_vma - The pagewalk pre_vma callback.
165c5acad84SThomas Hellstrom  *
166c5acad84SThomas Hellstrom  * The pre_vma callback performs the cache flush, stages the tlb flush
167c5acad84SThomas Hellstrom  * and calls the necessary mmu notifiers.
168c5acad84SThomas Hellstrom  */
wp_clean_pre_vma(unsigned long start,unsigned long end,struct mm_walk * walk)169c5acad84SThomas Hellstrom static int wp_clean_pre_vma(unsigned long start, unsigned long end,
170c5acad84SThomas Hellstrom 			    struct mm_walk *walk)
171c5acad84SThomas Hellstrom {
172c5acad84SThomas Hellstrom 	struct wp_walk *wpwalk = walk->private;
173c5acad84SThomas Hellstrom 
174c5acad84SThomas Hellstrom 	wpwalk->tlbflush_start = end;
175c5acad84SThomas Hellstrom 	wpwalk->tlbflush_end = start;
176c5acad84SThomas Hellstrom 
177c5acad84SThomas Hellstrom 	mmu_notifier_range_init(&wpwalk->range, MMU_NOTIFY_PROTECTION_PAGE, 0,
1787d4a8be0SAlistair Popple 				walk->mm, start, end);
179c5acad84SThomas Hellstrom 	mmu_notifier_invalidate_range_start(&wpwalk->range);
180c5acad84SThomas Hellstrom 	flush_cache_range(walk->vma, start, end);
181c5acad84SThomas Hellstrom 
182c5acad84SThomas Hellstrom 	/*
183c5acad84SThomas Hellstrom 	 * We're not using tlb_gather_mmu() since typically
184c5acad84SThomas Hellstrom 	 * only a small subrange of PTEs are affected, whereas
185c5acad84SThomas Hellstrom 	 * tlb_gather_mmu() records the full range.
186c5acad84SThomas Hellstrom 	 */
187c5acad84SThomas Hellstrom 	inc_tlb_flush_pending(walk->mm);
188c5acad84SThomas Hellstrom 
189c5acad84SThomas Hellstrom 	return 0;
190c5acad84SThomas Hellstrom }
191c5acad84SThomas Hellstrom 
192c5acad84SThomas Hellstrom /*
193c5acad84SThomas Hellstrom  * wp_clean_post_vma - The pagewalk post_vma callback.
194c5acad84SThomas Hellstrom  *
195c5acad84SThomas Hellstrom  * The post_vma callback performs the tlb flush and calls necessary mmu
196c5acad84SThomas Hellstrom  * notifiers.
197c5acad84SThomas Hellstrom  */
wp_clean_post_vma(struct mm_walk * walk)198c5acad84SThomas Hellstrom static void wp_clean_post_vma(struct mm_walk *walk)
199c5acad84SThomas Hellstrom {
200c5acad84SThomas Hellstrom 	struct wp_walk *wpwalk = walk->private;
201c5acad84SThomas Hellstrom 
202c5acad84SThomas Hellstrom 	if (mm_tlb_flush_nested(walk->mm))
203c5acad84SThomas Hellstrom 		flush_tlb_range(walk->vma, wpwalk->range.start,
204c5acad84SThomas Hellstrom 				wpwalk->range.end);
205c5acad84SThomas Hellstrom 	else if (wpwalk->tlbflush_end > wpwalk->tlbflush_start)
206c5acad84SThomas Hellstrom 		flush_tlb_range(walk->vma, wpwalk->tlbflush_start,
207c5acad84SThomas Hellstrom 				wpwalk->tlbflush_end);
208c5acad84SThomas Hellstrom 
209c5acad84SThomas Hellstrom 	mmu_notifier_invalidate_range_end(&wpwalk->range);
210c5acad84SThomas Hellstrom 	dec_tlb_flush_pending(walk->mm);
211c5acad84SThomas Hellstrom }
212c5acad84SThomas Hellstrom 
213c5acad84SThomas Hellstrom /*
214c5acad84SThomas Hellstrom  * wp_clean_test_walk - The pagewalk test_walk callback.
215c5acad84SThomas Hellstrom  *
216c5acad84SThomas Hellstrom  * Won't perform dirty-tracking on COW, read-only or HUGETLB vmas.
217c5acad84SThomas Hellstrom  */
wp_clean_test_walk(unsigned long start,unsigned long end,struct mm_walk * walk)218c5acad84SThomas Hellstrom static int wp_clean_test_walk(unsigned long start, unsigned long end,
219c5acad84SThomas Hellstrom 			      struct mm_walk *walk)
220c5acad84SThomas Hellstrom {
221c5acad84SThomas Hellstrom 	unsigned long vm_flags = READ_ONCE(walk->vma->vm_flags);
222c5acad84SThomas Hellstrom 
223c5acad84SThomas Hellstrom 	/* Skip non-applicable VMAs */
224c5acad84SThomas Hellstrom 	if ((vm_flags & (VM_SHARED | VM_MAYWRITE | VM_HUGETLB)) !=
225c5acad84SThomas Hellstrom 	    (VM_SHARED | VM_MAYWRITE))
226c5acad84SThomas Hellstrom 		return 1;
227c5acad84SThomas Hellstrom 
228c5acad84SThomas Hellstrom 	return 0;
229c5acad84SThomas Hellstrom }
230c5acad84SThomas Hellstrom 
231c5acad84SThomas Hellstrom static const struct mm_walk_ops clean_walk_ops = {
232c5acad84SThomas Hellstrom 	.pte_entry = clean_record_pte,
233c5acad84SThomas Hellstrom 	.pmd_entry = wp_clean_pmd_entry,
234c5acad84SThomas Hellstrom 	.pud_entry = wp_clean_pud_entry,
235c5acad84SThomas Hellstrom 	.test_walk = wp_clean_test_walk,
236c5acad84SThomas Hellstrom 	.pre_vma = wp_clean_pre_vma,
237c5acad84SThomas Hellstrom 	.post_vma = wp_clean_post_vma
238c5acad84SThomas Hellstrom };
239c5acad84SThomas Hellstrom 
240c5acad84SThomas Hellstrom static const struct mm_walk_ops wp_walk_ops = {
241c5acad84SThomas Hellstrom 	.pte_entry = wp_pte,
242c5acad84SThomas Hellstrom 	.pmd_entry = wp_clean_pmd_entry,
243c5acad84SThomas Hellstrom 	.pud_entry = wp_clean_pud_entry,
244c5acad84SThomas Hellstrom 	.test_walk = wp_clean_test_walk,
245c5acad84SThomas Hellstrom 	.pre_vma = wp_clean_pre_vma,
246c5acad84SThomas Hellstrom 	.post_vma = wp_clean_post_vma
247c5acad84SThomas Hellstrom };
248c5acad84SThomas Hellstrom 
249c5acad84SThomas Hellstrom /**
250c5acad84SThomas Hellstrom  * wp_shared_mapping_range - Write-protect all ptes in an address space range
251c5acad84SThomas Hellstrom  * @mapping: The address_space we want to write protect
252c5acad84SThomas Hellstrom  * @first_index: The first page offset in the range
253c5acad84SThomas Hellstrom  * @nr: Number of incremental page offsets to cover
254c5acad84SThomas Hellstrom  *
255c5acad84SThomas Hellstrom  * Note: This function currently skips transhuge page-table entries, since
256c5acad84SThomas Hellstrom  * it's intended for dirty-tracking on the PTE level. It will warn on
257c5acad84SThomas Hellstrom  * encountering transhuge write-enabled entries, though, and can easily be
258c5acad84SThomas Hellstrom  * extended to handle them as well.
259c5acad84SThomas Hellstrom  *
260c5acad84SThomas Hellstrom  * Return: The number of ptes actually write-protected. Note that
261c5acad84SThomas Hellstrom  * already write-protected ptes are not counted.
262c5acad84SThomas Hellstrom  */
wp_shared_mapping_range(struct address_space * mapping,pgoff_t first_index,pgoff_t nr)263c5acad84SThomas Hellstrom unsigned long wp_shared_mapping_range(struct address_space *mapping,
264c5acad84SThomas Hellstrom 				      pgoff_t first_index, pgoff_t nr)
265c5acad84SThomas Hellstrom {
266c5acad84SThomas Hellstrom 	struct wp_walk wpwalk = { .total = 0 };
267c5acad84SThomas Hellstrom 
268c5acad84SThomas Hellstrom 	i_mmap_lock_read(mapping);
269c5acad84SThomas Hellstrom 	WARN_ON(walk_page_mapping(mapping, first_index, nr, &wp_walk_ops,
270c5acad84SThomas Hellstrom 				  &wpwalk));
271c5acad84SThomas Hellstrom 	i_mmap_unlock_read(mapping);
272c5acad84SThomas Hellstrom 
273c5acad84SThomas Hellstrom 	return wpwalk.total;
274c5acad84SThomas Hellstrom }
275c5acad84SThomas Hellstrom EXPORT_SYMBOL_GPL(wp_shared_mapping_range);
276c5acad84SThomas Hellstrom 
277c5acad84SThomas Hellstrom /**
278c5acad84SThomas Hellstrom  * clean_record_shared_mapping_range - Clean and record all ptes in an
279c5acad84SThomas Hellstrom  * address space range
280c5acad84SThomas Hellstrom  * @mapping: The address_space we want to clean
281c5acad84SThomas Hellstrom  * @first_index: The first page offset in the range
282c5acad84SThomas Hellstrom  * @nr: Number of incremental page offsets to cover
283c5acad84SThomas Hellstrom  * @bitmap_pgoff: The page offset of the first bit in @bitmap
284c5acad84SThomas Hellstrom  * @bitmap: Pointer to a bitmap of at least @nr bits. The bitmap needs to
285c5acad84SThomas Hellstrom  * cover the whole range @first_index..@first_index + @nr.
286c5acad84SThomas Hellstrom  * @start: Pointer to number of the first set bit in @bitmap.
287c5acad84SThomas Hellstrom  * is modified as new bits are set by the function.
288c5acad84SThomas Hellstrom  * @end: Pointer to the number of the last set bit in @bitmap.
289c5acad84SThomas Hellstrom  * none set. The value is modified as new bits are set by the function.
290c5acad84SThomas Hellstrom  *
291*01a7eb3eSMatthew Wilcox (Oracle)  * When this function returns there is no guarantee that a CPU has
292c5acad84SThomas Hellstrom  * not already dirtied new ptes. However it will not clean any ptes not
293c5acad84SThomas Hellstrom  * reported in the bitmap. The guarantees are as follows:
294*01a7eb3eSMatthew Wilcox (Oracle)  *
295*01a7eb3eSMatthew Wilcox (Oracle)  * * All ptes dirty when the function starts executing will end up recorded
296c5acad84SThomas Hellstrom  *   in the bitmap.
297*01a7eb3eSMatthew Wilcox (Oracle)  * * All ptes dirtied after that will either remain dirty, be recorded in the
298c5acad84SThomas Hellstrom  *   bitmap or both.
299c5acad84SThomas Hellstrom  *
300c5acad84SThomas Hellstrom  * If a caller needs to make sure all dirty ptes are picked up and none
301c5acad84SThomas Hellstrom  * additional are added, it first needs to write-protect the address-space
302c5acad84SThomas Hellstrom  * range and make sure new writers are blocked in page_mkwrite() or
303c5acad84SThomas Hellstrom  * pfn_mkwrite(). And then after a TLB flush following the write-protection
304c5acad84SThomas Hellstrom  * pick up all dirty bits.
305c5acad84SThomas Hellstrom  *
306b417941fSMel Gorman  * This function currently skips transhuge page-table entries, since
307c5acad84SThomas Hellstrom  * it's intended for dirty-tracking on the PTE level. It will warn on
308c5acad84SThomas Hellstrom  * encountering transhuge dirty entries, though, and can easily be extended
309c5acad84SThomas Hellstrom  * to handle them as well.
310c5acad84SThomas Hellstrom  *
311c5acad84SThomas Hellstrom  * Return: The number of dirty ptes actually cleaned.
312c5acad84SThomas Hellstrom  */
clean_record_shared_mapping_range(struct address_space * mapping,pgoff_t first_index,pgoff_t nr,pgoff_t bitmap_pgoff,unsigned long * bitmap,pgoff_t * start,pgoff_t * end)313c5acad84SThomas Hellstrom unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
314c5acad84SThomas Hellstrom 						pgoff_t first_index, pgoff_t nr,
315c5acad84SThomas Hellstrom 						pgoff_t bitmap_pgoff,
316c5acad84SThomas Hellstrom 						unsigned long *bitmap,
317c5acad84SThomas Hellstrom 						pgoff_t *start,
318c5acad84SThomas Hellstrom 						pgoff_t *end)
319c5acad84SThomas Hellstrom {
320c5acad84SThomas Hellstrom 	bool none_set = (*start >= *end);
321c5acad84SThomas Hellstrom 	struct clean_walk cwalk = {
322c5acad84SThomas Hellstrom 		.base = { .total = 0 },
323c5acad84SThomas Hellstrom 		.bitmap_pgoff = bitmap_pgoff,
324c5acad84SThomas Hellstrom 		.bitmap = bitmap,
325c5acad84SThomas Hellstrom 		.start = none_set ? nr : *start,
326c5acad84SThomas Hellstrom 		.end = none_set ? 0 : *end,
327c5acad84SThomas Hellstrom 	};
328c5acad84SThomas Hellstrom 
329c5acad84SThomas Hellstrom 	i_mmap_lock_read(mapping);
330c5acad84SThomas Hellstrom 	WARN_ON(walk_page_mapping(mapping, first_index, nr, &clean_walk_ops,
331c5acad84SThomas Hellstrom 				  &cwalk.base));
332c5acad84SThomas Hellstrom 	i_mmap_unlock_read(mapping);
333c5acad84SThomas Hellstrom 
334c5acad84SThomas Hellstrom 	*start = cwalk.start;
335c5acad84SThomas Hellstrom 	*end = cwalk.end;
336c5acad84SThomas Hellstrom 
337c5acad84SThomas Hellstrom 	return cwalk.base.total;
338c5acad84SThomas Hellstrom }
339c5acad84SThomas Hellstrom EXPORT_SYMBOL_GPL(clean_record_shared_mapping_range);
340