1c5acad84SThomas Hellstrom // SPDX-License-Identifier: GPL-2.0 2c5acad84SThomas Hellstrom #include <linux/pagewalk.h> 3c5acad84SThomas Hellstrom #include <linux/hugetlb.h> 4c5acad84SThomas Hellstrom #include <linux/bitops.h> 5c5acad84SThomas Hellstrom #include <linux/mmu_notifier.h> 636090defSArnd Bergmann #include <linux/mm_inline.h> 7c5acad84SThomas Hellstrom #include <asm/cacheflush.h> 8c5acad84SThomas Hellstrom #include <asm/tlbflush.h> 9c5acad84SThomas Hellstrom 10c5acad84SThomas Hellstrom /** 11c5acad84SThomas Hellstrom * struct wp_walk - Private struct for pagetable walk callbacks 12c5acad84SThomas Hellstrom * @range: Range for mmu notifiers 13c5acad84SThomas Hellstrom * @tlbflush_start: Address of first modified pte 14c5acad84SThomas Hellstrom * @tlbflush_end: Address of last modified pte + 1 15c5acad84SThomas Hellstrom * @total: Total number of modified ptes 16c5acad84SThomas Hellstrom */ 17c5acad84SThomas Hellstrom struct wp_walk { 18c5acad84SThomas Hellstrom struct mmu_notifier_range range; 19c5acad84SThomas Hellstrom unsigned long tlbflush_start; 20c5acad84SThomas Hellstrom unsigned long tlbflush_end; 21c5acad84SThomas Hellstrom unsigned long total; 22c5acad84SThomas Hellstrom }; 23c5acad84SThomas Hellstrom 24c5acad84SThomas Hellstrom /** 25c5acad84SThomas Hellstrom * wp_pte - Write-protect a pte 26c5acad84SThomas Hellstrom * @pte: Pointer to the pte 27f5b7e739SAlex Shi * @addr: The start of protecting virtual address 28f5b7e739SAlex Shi * @end: The end of protecting virtual address 29c5acad84SThomas Hellstrom * @walk: pagetable walk callback argument 30c5acad84SThomas Hellstrom * 31c5acad84SThomas Hellstrom * The function write-protects a pte and records the range in 32c5acad84SThomas Hellstrom * virtual address space of touched ptes for efficient range TLB flushes. 33c5acad84SThomas Hellstrom */ 34c5acad84SThomas Hellstrom static int wp_pte(pte_t *pte, unsigned long addr, unsigned long end, 35c5acad84SThomas Hellstrom struct mm_walk *walk) 36c5acad84SThomas Hellstrom { 37c5acad84SThomas Hellstrom struct wp_walk *wpwalk = walk->private; 38c5acad84SThomas Hellstrom pte_t ptent = *pte; 39c5acad84SThomas Hellstrom 40c5acad84SThomas Hellstrom if (pte_write(ptent)) { 41c5acad84SThomas Hellstrom pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte); 42c5acad84SThomas Hellstrom 43c5acad84SThomas Hellstrom ptent = pte_wrprotect(old_pte); 44c5acad84SThomas Hellstrom ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); 45c5acad84SThomas Hellstrom wpwalk->total++; 46c5acad84SThomas Hellstrom wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr); 47c5acad84SThomas Hellstrom wpwalk->tlbflush_end = max(wpwalk->tlbflush_end, 48c5acad84SThomas Hellstrom addr + PAGE_SIZE); 49c5acad84SThomas Hellstrom } 50c5acad84SThomas Hellstrom 51c5acad84SThomas Hellstrom return 0; 52c5acad84SThomas Hellstrom } 53c5acad84SThomas Hellstrom 54c5acad84SThomas Hellstrom /** 55c5acad84SThomas Hellstrom * struct clean_walk - Private struct for the clean_record_pte function. 56c5acad84SThomas Hellstrom * @base: struct wp_walk we derive from 57c5acad84SThomas Hellstrom * @bitmap_pgoff: Address_space Page offset of the first bit in @bitmap 58c5acad84SThomas Hellstrom * @bitmap: Bitmap with one bit for each page offset in the address_space range 59c5acad84SThomas Hellstrom * covered. 60c5acad84SThomas Hellstrom * @start: Address_space page offset of first modified pte relative 61c5acad84SThomas Hellstrom * to @bitmap_pgoff 62c5acad84SThomas Hellstrom * @end: Address_space page offset of last modified pte relative 63c5acad84SThomas Hellstrom * to @bitmap_pgoff 64c5acad84SThomas Hellstrom */ 65c5acad84SThomas Hellstrom struct clean_walk { 66c5acad84SThomas Hellstrom struct wp_walk base; 67c5acad84SThomas Hellstrom pgoff_t bitmap_pgoff; 68c5acad84SThomas Hellstrom unsigned long *bitmap; 69c5acad84SThomas Hellstrom pgoff_t start; 70c5acad84SThomas Hellstrom pgoff_t end; 71c5acad84SThomas Hellstrom }; 72c5acad84SThomas Hellstrom 73c5acad84SThomas Hellstrom #define to_clean_walk(_wpwalk) container_of(_wpwalk, struct clean_walk, base) 74c5acad84SThomas Hellstrom 75c5acad84SThomas Hellstrom /** 76c5acad84SThomas Hellstrom * clean_record_pte - Clean a pte and record its address space offset in a 77c5acad84SThomas Hellstrom * bitmap 78c5acad84SThomas Hellstrom * @pte: Pointer to the pte 79f5b7e739SAlex Shi * @addr: The start of virtual address to be clean 80f5b7e739SAlex Shi * @end: The end of virtual address to be clean 81c5acad84SThomas Hellstrom * @walk: pagetable walk callback argument 82c5acad84SThomas Hellstrom * 83c5acad84SThomas Hellstrom * The function cleans a pte and records the range in 84c5acad84SThomas Hellstrom * virtual address space of touched ptes for efficient TLB flushes. 85c5acad84SThomas Hellstrom * It also records dirty ptes in a bitmap representing page offsets 86c5acad84SThomas Hellstrom * in the address_space, as well as the first and last of the bits 87c5acad84SThomas Hellstrom * touched. 88c5acad84SThomas Hellstrom */ 89c5acad84SThomas Hellstrom static int clean_record_pte(pte_t *pte, unsigned long addr, 90c5acad84SThomas Hellstrom unsigned long end, struct mm_walk *walk) 91c5acad84SThomas Hellstrom { 92c5acad84SThomas Hellstrom struct wp_walk *wpwalk = walk->private; 93c5acad84SThomas Hellstrom struct clean_walk *cwalk = to_clean_walk(wpwalk); 94c5acad84SThomas Hellstrom pte_t ptent = *pte; 95c5acad84SThomas Hellstrom 96c5acad84SThomas Hellstrom if (pte_dirty(ptent)) { 97c5acad84SThomas Hellstrom pgoff_t pgoff = ((addr - walk->vma->vm_start) >> PAGE_SHIFT) + 98c5acad84SThomas Hellstrom walk->vma->vm_pgoff - cwalk->bitmap_pgoff; 99c5acad84SThomas Hellstrom pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte); 100c5acad84SThomas Hellstrom 101c5acad84SThomas Hellstrom ptent = pte_mkclean(old_pte); 102c5acad84SThomas Hellstrom ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); 103c5acad84SThomas Hellstrom 104c5acad84SThomas Hellstrom wpwalk->total++; 105c5acad84SThomas Hellstrom wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr); 106c5acad84SThomas Hellstrom wpwalk->tlbflush_end = max(wpwalk->tlbflush_end, 107c5acad84SThomas Hellstrom addr + PAGE_SIZE); 108c5acad84SThomas Hellstrom 109c5acad84SThomas Hellstrom __set_bit(pgoff, cwalk->bitmap); 110c5acad84SThomas Hellstrom cwalk->start = min(cwalk->start, pgoff); 111c5acad84SThomas Hellstrom cwalk->end = max(cwalk->end, pgoff + 1); 112c5acad84SThomas Hellstrom } 113c5acad84SThomas Hellstrom 114c5acad84SThomas Hellstrom return 0; 115c5acad84SThomas Hellstrom } 116c5acad84SThomas Hellstrom 117b2a403fdSThomas Hellstrom /* 118b2a403fdSThomas Hellstrom * wp_clean_pmd_entry - The pagewalk pmd callback. 119b2a403fdSThomas Hellstrom * 120b2a403fdSThomas Hellstrom * Dirty-tracking should take place on the PTE level, so 121b2a403fdSThomas Hellstrom * WARN() if encountering a dirty huge pmd. 122b2a403fdSThomas Hellstrom * Furthermore, never split huge pmds, since that currently 123b2a403fdSThomas Hellstrom * causes dirty info loss. The pagefault handler should do 124b2a403fdSThomas Hellstrom * that if needed. 125b2a403fdSThomas Hellstrom */ 126c5acad84SThomas Hellstrom static int wp_clean_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long end, 127c5acad84SThomas Hellstrom struct mm_walk *walk) 128c5acad84SThomas Hellstrom { 129*dab6e717SPeter Zijlstra pmd_t pmdval = pmdp_get_lockless(pmd); 130c5acad84SThomas Hellstrom 131b2a403fdSThomas Hellstrom if (!pmd_trans_unstable(&pmdval)) 132b2a403fdSThomas Hellstrom return 0; 133b2a403fdSThomas Hellstrom 134b2a403fdSThomas Hellstrom if (pmd_none(pmdval)) { 135b2a403fdSThomas Hellstrom walk->action = ACTION_AGAIN; 136b2a403fdSThomas Hellstrom return 0; 137b2a403fdSThomas Hellstrom } 138b2a403fdSThomas Hellstrom 139b2a403fdSThomas Hellstrom /* Huge pmd, present or migrated */ 140b2a403fdSThomas Hellstrom walk->action = ACTION_CONTINUE; 141c5acad84SThomas Hellstrom if (pmd_trans_huge(pmdval) || pmd_devmap(pmdval)) 142c5acad84SThomas Hellstrom WARN_ON(pmd_write(pmdval) || pmd_dirty(pmdval)); 143c5acad84SThomas Hellstrom 144c5acad84SThomas Hellstrom return 0; 145c5acad84SThomas Hellstrom } 146c5acad84SThomas Hellstrom 147b2a403fdSThomas Hellstrom /* 148b2a403fdSThomas Hellstrom * wp_clean_pud_entry - The pagewalk pud callback. 149b2a403fdSThomas Hellstrom * 150b2a403fdSThomas Hellstrom * Dirty-tracking should take place on the PTE level, so 151b2a403fdSThomas Hellstrom * WARN() if encountering a dirty huge puds. 152b2a403fdSThomas Hellstrom * Furthermore, never split huge puds, since that currently 153b2a403fdSThomas Hellstrom * causes dirty info loss. The pagefault handler should do 154b2a403fdSThomas Hellstrom * that if needed. 155b2a403fdSThomas Hellstrom */ 156c5acad84SThomas Hellstrom static int wp_clean_pud_entry(pud_t *pud, unsigned long addr, unsigned long end, 157c5acad84SThomas Hellstrom struct mm_walk *walk) 158c5acad84SThomas Hellstrom { 159c5acad84SThomas Hellstrom pud_t pudval = READ_ONCE(*pud); 160c5acad84SThomas Hellstrom 161b2a403fdSThomas Hellstrom if (!pud_trans_unstable(&pudval)) 162b2a403fdSThomas Hellstrom return 0; 163b2a403fdSThomas Hellstrom 164b2a403fdSThomas Hellstrom if (pud_none(pudval)) { 165b2a403fdSThomas Hellstrom walk->action = ACTION_AGAIN; 166b2a403fdSThomas Hellstrom return 0; 167b2a403fdSThomas Hellstrom } 168b2a403fdSThomas Hellstrom 16994036f4cSZack Rusin #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 170b2a403fdSThomas Hellstrom /* Huge pud */ 171b2a403fdSThomas Hellstrom walk->action = ACTION_CONTINUE; 172c5acad84SThomas Hellstrom if (pud_trans_huge(pudval) || pud_devmap(pudval)) 173c5acad84SThomas Hellstrom WARN_ON(pud_write(pudval) || pud_dirty(pudval)); 17494036f4cSZack Rusin #endif 175c5acad84SThomas Hellstrom 176c5acad84SThomas Hellstrom return 0; 177c5acad84SThomas Hellstrom } 178c5acad84SThomas Hellstrom 179c5acad84SThomas Hellstrom /* 180c5acad84SThomas Hellstrom * wp_clean_pre_vma - The pagewalk pre_vma callback. 181c5acad84SThomas Hellstrom * 182c5acad84SThomas Hellstrom * The pre_vma callback performs the cache flush, stages the tlb flush 183c5acad84SThomas Hellstrom * and calls the necessary mmu notifiers. 184c5acad84SThomas Hellstrom */ 185c5acad84SThomas Hellstrom static int wp_clean_pre_vma(unsigned long start, unsigned long end, 186c5acad84SThomas Hellstrom struct mm_walk *walk) 187c5acad84SThomas Hellstrom { 188c5acad84SThomas Hellstrom struct wp_walk *wpwalk = walk->private; 189c5acad84SThomas Hellstrom 190c5acad84SThomas Hellstrom wpwalk->tlbflush_start = end; 191c5acad84SThomas Hellstrom wpwalk->tlbflush_end = start; 192c5acad84SThomas Hellstrom 193c5acad84SThomas Hellstrom mmu_notifier_range_init(&wpwalk->range, MMU_NOTIFY_PROTECTION_PAGE, 0, 194c5acad84SThomas Hellstrom walk->vma, walk->mm, start, end); 195c5acad84SThomas Hellstrom mmu_notifier_invalidate_range_start(&wpwalk->range); 196c5acad84SThomas Hellstrom flush_cache_range(walk->vma, start, end); 197c5acad84SThomas Hellstrom 198c5acad84SThomas Hellstrom /* 199c5acad84SThomas Hellstrom * We're not using tlb_gather_mmu() since typically 200c5acad84SThomas Hellstrom * only a small subrange of PTEs are affected, whereas 201c5acad84SThomas Hellstrom * tlb_gather_mmu() records the full range. 202c5acad84SThomas Hellstrom */ 203c5acad84SThomas Hellstrom inc_tlb_flush_pending(walk->mm); 204c5acad84SThomas Hellstrom 205c5acad84SThomas Hellstrom return 0; 206c5acad84SThomas Hellstrom } 207c5acad84SThomas Hellstrom 208c5acad84SThomas Hellstrom /* 209c5acad84SThomas Hellstrom * wp_clean_post_vma - The pagewalk post_vma callback. 210c5acad84SThomas Hellstrom * 211c5acad84SThomas Hellstrom * The post_vma callback performs the tlb flush and calls necessary mmu 212c5acad84SThomas Hellstrom * notifiers. 213c5acad84SThomas Hellstrom */ 214c5acad84SThomas Hellstrom static void wp_clean_post_vma(struct mm_walk *walk) 215c5acad84SThomas Hellstrom { 216c5acad84SThomas Hellstrom struct wp_walk *wpwalk = walk->private; 217c5acad84SThomas Hellstrom 218c5acad84SThomas Hellstrom if (mm_tlb_flush_nested(walk->mm)) 219c5acad84SThomas Hellstrom flush_tlb_range(walk->vma, wpwalk->range.start, 220c5acad84SThomas Hellstrom wpwalk->range.end); 221c5acad84SThomas Hellstrom else if (wpwalk->tlbflush_end > wpwalk->tlbflush_start) 222c5acad84SThomas Hellstrom flush_tlb_range(walk->vma, wpwalk->tlbflush_start, 223c5acad84SThomas Hellstrom wpwalk->tlbflush_end); 224c5acad84SThomas Hellstrom 225c5acad84SThomas Hellstrom mmu_notifier_invalidate_range_end(&wpwalk->range); 226c5acad84SThomas Hellstrom dec_tlb_flush_pending(walk->mm); 227c5acad84SThomas Hellstrom } 228c5acad84SThomas Hellstrom 229c5acad84SThomas Hellstrom /* 230c5acad84SThomas Hellstrom * wp_clean_test_walk - The pagewalk test_walk callback. 231c5acad84SThomas Hellstrom * 232c5acad84SThomas Hellstrom * Won't perform dirty-tracking on COW, read-only or HUGETLB vmas. 233c5acad84SThomas Hellstrom */ 234c5acad84SThomas Hellstrom static int wp_clean_test_walk(unsigned long start, unsigned long end, 235c5acad84SThomas Hellstrom struct mm_walk *walk) 236c5acad84SThomas Hellstrom { 237c5acad84SThomas Hellstrom unsigned long vm_flags = READ_ONCE(walk->vma->vm_flags); 238c5acad84SThomas Hellstrom 239c5acad84SThomas Hellstrom /* Skip non-applicable VMAs */ 240c5acad84SThomas Hellstrom if ((vm_flags & (VM_SHARED | VM_MAYWRITE | VM_HUGETLB)) != 241c5acad84SThomas Hellstrom (VM_SHARED | VM_MAYWRITE)) 242c5acad84SThomas Hellstrom return 1; 243c5acad84SThomas Hellstrom 244c5acad84SThomas Hellstrom return 0; 245c5acad84SThomas Hellstrom } 246c5acad84SThomas Hellstrom 247c5acad84SThomas Hellstrom static const struct mm_walk_ops clean_walk_ops = { 248c5acad84SThomas Hellstrom .pte_entry = clean_record_pte, 249c5acad84SThomas Hellstrom .pmd_entry = wp_clean_pmd_entry, 250c5acad84SThomas Hellstrom .pud_entry = wp_clean_pud_entry, 251c5acad84SThomas Hellstrom .test_walk = wp_clean_test_walk, 252c5acad84SThomas Hellstrom .pre_vma = wp_clean_pre_vma, 253c5acad84SThomas Hellstrom .post_vma = wp_clean_post_vma 254c5acad84SThomas Hellstrom }; 255c5acad84SThomas Hellstrom 256c5acad84SThomas Hellstrom static const struct mm_walk_ops wp_walk_ops = { 257c5acad84SThomas Hellstrom .pte_entry = wp_pte, 258c5acad84SThomas Hellstrom .pmd_entry = wp_clean_pmd_entry, 259c5acad84SThomas Hellstrom .pud_entry = wp_clean_pud_entry, 260c5acad84SThomas Hellstrom .test_walk = wp_clean_test_walk, 261c5acad84SThomas Hellstrom .pre_vma = wp_clean_pre_vma, 262c5acad84SThomas Hellstrom .post_vma = wp_clean_post_vma 263c5acad84SThomas Hellstrom }; 264c5acad84SThomas Hellstrom 265c5acad84SThomas Hellstrom /** 266c5acad84SThomas Hellstrom * wp_shared_mapping_range - Write-protect all ptes in an address space range 267c5acad84SThomas Hellstrom * @mapping: The address_space we want to write protect 268c5acad84SThomas Hellstrom * @first_index: The first page offset in the range 269c5acad84SThomas Hellstrom * @nr: Number of incremental page offsets to cover 270c5acad84SThomas Hellstrom * 271c5acad84SThomas Hellstrom * Note: This function currently skips transhuge page-table entries, since 272c5acad84SThomas Hellstrom * it's intended for dirty-tracking on the PTE level. It will warn on 273c5acad84SThomas Hellstrom * encountering transhuge write-enabled entries, though, and can easily be 274c5acad84SThomas Hellstrom * extended to handle them as well. 275c5acad84SThomas Hellstrom * 276c5acad84SThomas Hellstrom * Return: The number of ptes actually write-protected. Note that 277c5acad84SThomas Hellstrom * already write-protected ptes are not counted. 278c5acad84SThomas Hellstrom */ 279c5acad84SThomas Hellstrom unsigned long wp_shared_mapping_range(struct address_space *mapping, 280c5acad84SThomas Hellstrom pgoff_t first_index, pgoff_t nr) 281c5acad84SThomas Hellstrom { 282c5acad84SThomas Hellstrom struct wp_walk wpwalk = { .total = 0 }; 283c5acad84SThomas Hellstrom 284c5acad84SThomas Hellstrom i_mmap_lock_read(mapping); 285c5acad84SThomas Hellstrom WARN_ON(walk_page_mapping(mapping, first_index, nr, &wp_walk_ops, 286c5acad84SThomas Hellstrom &wpwalk)); 287c5acad84SThomas Hellstrom i_mmap_unlock_read(mapping); 288c5acad84SThomas Hellstrom 289c5acad84SThomas Hellstrom return wpwalk.total; 290c5acad84SThomas Hellstrom } 291c5acad84SThomas Hellstrom EXPORT_SYMBOL_GPL(wp_shared_mapping_range); 292c5acad84SThomas Hellstrom 293c5acad84SThomas Hellstrom /** 294c5acad84SThomas Hellstrom * clean_record_shared_mapping_range - Clean and record all ptes in an 295c5acad84SThomas Hellstrom * address space range 296c5acad84SThomas Hellstrom * @mapping: The address_space we want to clean 297c5acad84SThomas Hellstrom * @first_index: The first page offset in the range 298c5acad84SThomas Hellstrom * @nr: Number of incremental page offsets to cover 299c5acad84SThomas Hellstrom * @bitmap_pgoff: The page offset of the first bit in @bitmap 300c5acad84SThomas Hellstrom * @bitmap: Pointer to a bitmap of at least @nr bits. The bitmap needs to 301c5acad84SThomas Hellstrom * cover the whole range @first_index..@first_index + @nr. 302c5acad84SThomas Hellstrom * @start: Pointer to number of the first set bit in @bitmap. 303c5acad84SThomas Hellstrom * is modified as new bits are set by the function. 304c5acad84SThomas Hellstrom * @end: Pointer to the number of the last set bit in @bitmap. 305c5acad84SThomas Hellstrom * none set. The value is modified as new bits are set by the function. 306c5acad84SThomas Hellstrom * 307c5acad84SThomas Hellstrom * Note: When this function returns there is no guarantee that a CPU has 308c5acad84SThomas Hellstrom * not already dirtied new ptes. However it will not clean any ptes not 309c5acad84SThomas Hellstrom * reported in the bitmap. The guarantees are as follows: 310c5acad84SThomas Hellstrom * a) All ptes dirty when the function starts executing will end up recorded 311c5acad84SThomas Hellstrom * in the bitmap. 312c5acad84SThomas Hellstrom * b) All ptes dirtied after that will either remain dirty, be recorded in the 313c5acad84SThomas Hellstrom * bitmap or both. 314c5acad84SThomas Hellstrom * 315c5acad84SThomas Hellstrom * If a caller needs to make sure all dirty ptes are picked up and none 316c5acad84SThomas Hellstrom * additional are added, it first needs to write-protect the address-space 317c5acad84SThomas Hellstrom * range and make sure new writers are blocked in page_mkwrite() or 318c5acad84SThomas Hellstrom * pfn_mkwrite(). And then after a TLB flush following the write-protection 319c5acad84SThomas Hellstrom * pick up all dirty bits. 320c5acad84SThomas Hellstrom * 321b417941fSMel Gorman * This function currently skips transhuge page-table entries, since 322c5acad84SThomas Hellstrom * it's intended for dirty-tracking on the PTE level. It will warn on 323c5acad84SThomas Hellstrom * encountering transhuge dirty entries, though, and can easily be extended 324c5acad84SThomas Hellstrom * to handle them as well. 325c5acad84SThomas Hellstrom * 326c5acad84SThomas Hellstrom * Return: The number of dirty ptes actually cleaned. 327c5acad84SThomas Hellstrom */ 328c5acad84SThomas Hellstrom unsigned long clean_record_shared_mapping_range(struct address_space *mapping, 329c5acad84SThomas Hellstrom pgoff_t first_index, pgoff_t nr, 330c5acad84SThomas Hellstrom pgoff_t bitmap_pgoff, 331c5acad84SThomas Hellstrom unsigned long *bitmap, 332c5acad84SThomas Hellstrom pgoff_t *start, 333c5acad84SThomas Hellstrom pgoff_t *end) 334c5acad84SThomas Hellstrom { 335c5acad84SThomas Hellstrom bool none_set = (*start >= *end); 336c5acad84SThomas Hellstrom struct clean_walk cwalk = { 337c5acad84SThomas Hellstrom .base = { .total = 0 }, 338c5acad84SThomas Hellstrom .bitmap_pgoff = bitmap_pgoff, 339c5acad84SThomas Hellstrom .bitmap = bitmap, 340c5acad84SThomas Hellstrom .start = none_set ? nr : *start, 341c5acad84SThomas Hellstrom .end = none_set ? 0 : *end, 342c5acad84SThomas Hellstrom }; 343c5acad84SThomas Hellstrom 344c5acad84SThomas Hellstrom i_mmap_lock_read(mapping); 345c5acad84SThomas Hellstrom WARN_ON(walk_page_mapping(mapping, first_index, nr, &clean_walk_ops, 346c5acad84SThomas Hellstrom &cwalk.base)); 347c5acad84SThomas Hellstrom i_mmap_unlock_read(mapping); 348c5acad84SThomas Hellstrom 349c5acad84SThomas Hellstrom *start = cwalk.start; 350c5acad84SThomas Hellstrom *end = cwalk.end; 351c5acad84SThomas Hellstrom 352c5acad84SThomas Hellstrom return cwalk.base.total; 353c5acad84SThomas Hellstrom } 354c5acad84SThomas Hellstrom EXPORT_SYMBOL_GPL(clean_record_shared_mapping_range); 355