1c5acad84SThomas Hellstrom // SPDX-License-Identifier: GPL-2.0 2c5acad84SThomas Hellstrom #include <linux/pagewalk.h> 3c5acad84SThomas Hellstrom #include <linux/hugetlb.h> 4c5acad84SThomas Hellstrom #include <linux/bitops.h> 5c5acad84SThomas Hellstrom #include <linux/mmu_notifier.h> 6c5acad84SThomas Hellstrom #include <asm/cacheflush.h> 7c5acad84SThomas Hellstrom #include <asm/tlbflush.h> 8c5acad84SThomas Hellstrom 9c5acad84SThomas Hellstrom /** 10c5acad84SThomas Hellstrom * struct wp_walk - Private struct for pagetable walk callbacks 11c5acad84SThomas Hellstrom * @range: Range for mmu notifiers 12c5acad84SThomas Hellstrom * @tlbflush_start: Address of first modified pte 13c5acad84SThomas Hellstrom * @tlbflush_end: Address of last modified pte + 1 14c5acad84SThomas Hellstrom * @total: Total number of modified ptes 15c5acad84SThomas Hellstrom */ 16c5acad84SThomas Hellstrom struct wp_walk { 17c5acad84SThomas Hellstrom struct mmu_notifier_range range; 18c5acad84SThomas Hellstrom unsigned long tlbflush_start; 19c5acad84SThomas Hellstrom unsigned long tlbflush_end; 20c5acad84SThomas Hellstrom unsigned long total; 21c5acad84SThomas Hellstrom }; 22c5acad84SThomas Hellstrom 23c5acad84SThomas Hellstrom /** 24c5acad84SThomas Hellstrom * wp_pte - Write-protect a pte 25c5acad84SThomas Hellstrom * @pte: Pointer to the pte 26f5b7e739SAlex Shi * @addr: The start of protecting virtual address 27f5b7e739SAlex Shi * @end: The end of protecting virtual address 28c5acad84SThomas Hellstrom * @walk: pagetable walk callback argument 29c5acad84SThomas Hellstrom * 30c5acad84SThomas Hellstrom * The function write-protects a pte and records the range in 31c5acad84SThomas Hellstrom * virtual address space of touched ptes for efficient range TLB flushes. 32c5acad84SThomas Hellstrom */ 33c5acad84SThomas Hellstrom static int wp_pte(pte_t *pte, unsigned long addr, unsigned long end, 34c5acad84SThomas Hellstrom struct mm_walk *walk) 35c5acad84SThomas Hellstrom { 36c5acad84SThomas Hellstrom struct wp_walk *wpwalk = walk->private; 37c5acad84SThomas Hellstrom pte_t ptent = *pte; 38c5acad84SThomas Hellstrom 39c5acad84SThomas Hellstrom if (pte_write(ptent)) { 40c5acad84SThomas Hellstrom pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte); 41c5acad84SThomas Hellstrom 42c5acad84SThomas Hellstrom ptent = pte_wrprotect(old_pte); 43c5acad84SThomas Hellstrom ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); 44c5acad84SThomas Hellstrom wpwalk->total++; 45c5acad84SThomas Hellstrom wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr); 46c5acad84SThomas Hellstrom wpwalk->tlbflush_end = max(wpwalk->tlbflush_end, 47c5acad84SThomas Hellstrom addr + PAGE_SIZE); 48c5acad84SThomas Hellstrom } 49c5acad84SThomas Hellstrom 50c5acad84SThomas Hellstrom return 0; 51c5acad84SThomas Hellstrom } 52c5acad84SThomas Hellstrom 53c5acad84SThomas Hellstrom /** 54c5acad84SThomas Hellstrom * struct clean_walk - Private struct for the clean_record_pte function. 55c5acad84SThomas Hellstrom * @base: struct wp_walk we derive from 56c5acad84SThomas Hellstrom * @bitmap_pgoff: Address_space Page offset of the first bit in @bitmap 57c5acad84SThomas Hellstrom * @bitmap: Bitmap with one bit for each page offset in the address_space range 58c5acad84SThomas Hellstrom * covered. 59c5acad84SThomas Hellstrom * @start: Address_space page offset of first modified pte relative 60c5acad84SThomas Hellstrom * to @bitmap_pgoff 61c5acad84SThomas Hellstrom * @end: Address_space page offset of last modified pte relative 62c5acad84SThomas Hellstrom * to @bitmap_pgoff 63c5acad84SThomas Hellstrom */ 64c5acad84SThomas Hellstrom struct clean_walk { 65c5acad84SThomas Hellstrom struct wp_walk base; 66c5acad84SThomas Hellstrom pgoff_t bitmap_pgoff; 67c5acad84SThomas Hellstrom unsigned long *bitmap; 68c5acad84SThomas Hellstrom pgoff_t start; 69c5acad84SThomas Hellstrom pgoff_t end; 70c5acad84SThomas Hellstrom }; 71c5acad84SThomas Hellstrom 72c5acad84SThomas Hellstrom #define to_clean_walk(_wpwalk) container_of(_wpwalk, struct clean_walk, base) 73c5acad84SThomas Hellstrom 74c5acad84SThomas Hellstrom /** 75c5acad84SThomas Hellstrom * clean_record_pte - Clean a pte and record its address space offset in a 76c5acad84SThomas Hellstrom * bitmap 77c5acad84SThomas Hellstrom * @pte: Pointer to the pte 78f5b7e739SAlex Shi * @addr: The start of virtual address to be clean 79f5b7e739SAlex Shi * @end: The end of virtual address to be clean 80c5acad84SThomas Hellstrom * @walk: pagetable walk callback argument 81c5acad84SThomas Hellstrom * 82c5acad84SThomas Hellstrom * The function cleans a pte and records the range in 83c5acad84SThomas Hellstrom * virtual address space of touched ptes for efficient TLB flushes. 84c5acad84SThomas Hellstrom * It also records dirty ptes in a bitmap representing page offsets 85c5acad84SThomas Hellstrom * in the address_space, as well as the first and last of the bits 86c5acad84SThomas Hellstrom * touched. 87c5acad84SThomas Hellstrom */ 88c5acad84SThomas Hellstrom static int clean_record_pte(pte_t *pte, unsigned long addr, 89c5acad84SThomas Hellstrom unsigned long end, struct mm_walk *walk) 90c5acad84SThomas Hellstrom { 91c5acad84SThomas Hellstrom struct wp_walk *wpwalk = walk->private; 92c5acad84SThomas Hellstrom struct clean_walk *cwalk = to_clean_walk(wpwalk); 93c5acad84SThomas Hellstrom pte_t ptent = *pte; 94c5acad84SThomas Hellstrom 95c5acad84SThomas Hellstrom if (pte_dirty(ptent)) { 96c5acad84SThomas Hellstrom pgoff_t pgoff = ((addr - walk->vma->vm_start) >> PAGE_SHIFT) + 97c5acad84SThomas Hellstrom walk->vma->vm_pgoff - cwalk->bitmap_pgoff; 98c5acad84SThomas Hellstrom pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte); 99c5acad84SThomas Hellstrom 100c5acad84SThomas Hellstrom ptent = pte_mkclean(old_pte); 101c5acad84SThomas Hellstrom ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); 102c5acad84SThomas Hellstrom 103c5acad84SThomas Hellstrom wpwalk->total++; 104c5acad84SThomas Hellstrom wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr); 105c5acad84SThomas Hellstrom wpwalk->tlbflush_end = max(wpwalk->tlbflush_end, 106c5acad84SThomas Hellstrom addr + PAGE_SIZE); 107c5acad84SThomas Hellstrom 108c5acad84SThomas Hellstrom __set_bit(pgoff, cwalk->bitmap); 109c5acad84SThomas Hellstrom cwalk->start = min(cwalk->start, pgoff); 110c5acad84SThomas Hellstrom cwalk->end = max(cwalk->end, pgoff + 1); 111c5acad84SThomas Hellstrom } 112c5acad84SThomas Hellstrom 113c5acad84SThomas Hellstrom return 0; 114c5acad84SThomas Hellstrom } 115c5acad84SThomas Hellstrom 116b2a403fdSThomas Hellstrom /* 117b2a403fdSThomas Hellstrom * wp_clean_pmd_entry - The pagewalk pmd callback. 118b2a403fdSThomas Hellstrom * 119b2a403fdSThomas Hellstrom * Dirty-tracking should take place on the PTE level, so 120b2a403fdSThomas Hellstrom * WARN() if encountering a dirty huge pmd. 121b2a403fdSThomas Hellstrom * Furthermore, never split huge pmds, since that currently 122b2a403fdSThomas Hellstrom * causes dirty info loss. The pagefault handler should do 123b2a403fdSThomas Hellstrom * that if needed. 124b2a403fdSThomas Hellstrom */ 125c5acad84SThomas Hellstrom static int wp_clean_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long end, 126c5acad84SThomas Hellstrom struct mm_walk *walk) 127c5acad84SThomas Hellstrom { 128c5acad84SThomas Hellstrom pmd_t pmdval = pmd_read_atomic(pmd); 129c5acad84SThomas Hellstrom 130b2a403fdSThomas Hellstrom if (!pmd_trans_unstable(&pmdval)) 131b2a403fdSThomas Hellstrom return 0; 132b2a403fdSThomas Hellstrom 133b2a403fdSThomas Hellstrom if (pmd_none(pmdval)) { 134b2a403fdSThomas Hellstrom walk->action = ACTION_AGAIN; 135b2a403fdSThomas Hellstrom return 0; 136b2a403fdSThomas Hellstrom } 137b2a403fdSThomas Hellstrom 138b2a403fdSThomas Hellstrom /* Huge pmd, present or migrated */ 139b2a403fdSThomas Hellstrom walk->action = ACTION_CONTINUE; 140c5acad84SThomas Hellstrom if (pmd_trans_huge(pmdval) || pmd_devmap(pmdval)) 141c5acad84SThomas Hellstrom WARN_ON(pmd_write(pmdval) || pmd_dirty(pmdval)); 142c5acad84SThomas Hellstrom 143c5acad84SThomas Hellstrom return 0; 144c5acad84SThomas Hellstrom } 145c5acad84SThomas Hellstrom 146b2a403fdSThomas Hellstrom /* 147b2a403fdSThomas Hellstrom * wp_clean_pud_entry - The pagewalk pud callback. 148b2a403fdSThomas Hellstrom * 149b2a403fdSThomas Hellstrom * Dirty-tracking should take place on the PTE level, so 150b2a403fdSThomas Hellstrom * WARN() if encountering a dirty huge puds. 151b2a403fdSThomas Hellstrom * Furthermore, never split huge puds, since that currently 152b2a403fdSThomas Hellstrom * causes dirty info loss. The pagefault handler should do 153b2a403fdSThomas Hellstrom * that if needed. 154b2a403fdSThomas Hellstrom */ 155c5acad84SThomas Hellstrom static int wp_clean_pud_entry(pud_t *pud, unsigned long addr, unsigned long end, 156c5acad84SThomas Hellstrom struct mm_walk *walk) 157c5acad84SThomas Hellstrom { 158c5acad84SThomas Hellstrom pud_t pudval = READ_ONCE(*pud); 159c5acad84SThomas Hellstrom 160b2a403fdSThomas Hellstrom if (!pud_trans_unstable(&pudval)) 161b2a403fdSThomas Hellstrom return 0; 162b2a403fdSThomas Hellstrom 163b2a403fdSThomas Hellstrom if (pud_none(pudval)) { 164b2a403fdSThomas Hellstrom walk->action = ACTION_AGAIN; 165b2a403fdSThomas Hellstrom return 0; 166b2a403fdSThomas Hellstrom } 167b2a403fdSThomas Hellstrom 16894036f4cSZack Rusin #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 169b2a403fdSThomas Hellstrom /* Huge pud */ 170b2a403fdSThomas Hellstrom walk->action = ACTION_CONTINUE; 171c5acad84SThomas Hellstrom if (pud_trans_huge(pudval) || pud_devmap(pudval)) 172c5acad84SThomas Hellstrom WARN_ON(pud_write(pudval) || pud_dirty(pudval)); 17394036f4cSZack Rusin #endif 174c5acad84SThomas Hellstrom 175c5acad84SThomas Hellstrom return 0; 176c5acad84SThomas Hellstrom } 177c5acad84SThomas Hellstrom 178c5acad84SThomas Hellstrom /* 179c5acad84SThomas Hellstrom * wp_clean_pre_vma - The pagewalk pre_vma callback. 180c5acad84SThomas Hellstrom * 181c5acad84SThomas Hellstrom * The pre_vma callback performs the cache flush, stages the tlb flush 182c5acad84SThomas Hellstrom * and calls the necessary mmu notifiers. 183c5acad84SThomas Hellstrom */ 184c5acad84SThomas Hellstrom static int wp_clean_pre_vma(unsigned long start, unsigned long end, 185c5acad84SThomas Hellstrom struct mm_walk *walk) 186c5acad84SThomas Hellstrom { 187c5acad84SThomas Hellstrom struct wp_walk *wpwalk = walk->private; 188c5acad84SThomas Hellstrom 189c5acad84SThomas Hellstrom wpwalk->tlbflush_start = end; 190c5acad84SThomas Hellstrom wpwalk->tlbflush_end = start; 191c5acad84SThomas Hellstrom 192c5acad84SThomas Hellstrom mmu_notifier_range_init(&wpwalk->range, MMU_NOTIFY_PROTECTION_PAGE, 0, 193c5acad84SThomas Hellstrom walk->vma, walk->mm, start, end); 194c5acad84SThomas Hellstrom mmu_notifier_invalidate_range_start(&wpwalk->range); 195c5acad84SThomas Hellstrom flush_cache_range(walk->vma, start, end); 196c5acad84SThomas Hellstrom 197c5acad84SThomas Hellstrom /* 198c5acad84SThomas Hellstrom * We're not using tlb_gather_mmu() since typically 199c5acad84SThomas Hellstrom * only a small subrange of PTEs are affected, whereas 200c5acad84SThomas Hellstrom * tlb_gather_mmu() records the full range. 201c5acad84SThomas Hellstrom */ 202c5acad84SThomas Hellstrom inc_tlb_flush_pending(walk->mm); 203c5acad84SThomas Hellstrom 204c5acad84SThomas Hellstrom return 0; 205c5acad84SThomas Hellstrom } 206c5acad84SThomas Hellstrom 207c5acad84SThomas Hellstrom /* 208c5acad84SThomas Hellstrom * wp_clean_post_vma - The pagewalk post_vma callback. 209c5acad84SThomas Hellstrom * 210c5acad84SThomas Hellstrom * The post_vma callback performs the tlb flush and calls necessary mmu 211c5acad84SThomas Hellstrom * notifiers. 212c5acad84SThomas Hellstrom */ 213c5acad84SThomas Hellstrom static void wp_clean_post_vma(struct mm_walk *walk) 214c5acad84SThomas Hellstrom { 215c5acad84SThomas Hellstrom struct wp_walk *wpwalk = walk->private; 216c5acad84SThomas Hellstrom 217c5acad84SThomas Hellstrom if (mm_tlb_flush_nested(walk->mm)) 218c5acad84SThomas Hellstrom flush_tlb_range(walk->vma, wpwalk->range.start, 219c5acad84SThomas Hellstrom wpwalk->range.end); 220c5acad84SThomas Hellstrom else if (wpwalk->tlbflush_end > wpwalk->tlbflush_start) 221c5acad84SThomas Hellstrom flush_tlb_range(walk->vma, wpwalk->tlbflush_start, 222c5acad84SThomas Hellstrom wpwalk->tlbflush_end); 223c5acad84SThomas Hellstrom 224c5acad84SThomas Hellstrom mmu_notifier_invalidate_range_end(&wpwalk->range); 225c5acad84SThomas Hellstrom dec_tlb_flush_pending(walk->mm); 226c5acad84SThomas Hellstrom } 227c5acad84SThomas Hellstrom 228c5acad84SThomas Hellstrom /* 229c5acad84SThomas Hellstrom * wp_clean_test_walk - The pagewalk test_walk callback. 230c5acad84SThomas Hellstrom * 231c5acad84SThomas Hellstrom * Won't perform dirty-tracking on COW, read-only or HUGETLB vmas. 232c5acad84SThomas Hellstrom */ 233c5acad84SThomas Hellstrom static int wp_clean_test_walk(unsigned long start, unsigned long end, 234c5acad84SThomas Hellstrom struct mm_walk *walk) 235c5acad84SThomas Hellstrom { 236c5acad84SThomas Hellstrom unsigned long vm_flags = READ_ONCE(walk->vma->vm_flags); 237c5acad84SThomas Hellstrom 238c5acad84SThomas Hellstrom /* Skip non-applicable VMAs */ 239c5acad84SThomas Hellstrom if ((vm_flags & (VM_SHARED | VM_MAYWRITE | VM_HUGETLB)) != 240c5acad84SThomas Hellstrom (VM_SHARED | VM_MAYWRITE)) 241c5acad84SThomas Hellstrom return 1; 242c5acad84SThomas Hellstrom 243c5acad84SThomas Hellstrom return 0; 244c5acad84SThomas Hellstrom } 245c5acad84SThomas Hellstrom 246c5acad84SThomas Hellstrom static const struct mm_walk_ops clean_walk_ops = { 247c5acad84SThomas Hellstrom .pte_entry = clean_record_pte, 248c5acad84SThomas Hellstrom .pmd_entry = wp_clean_pmd_entry, 249c5acad84SThomas Hellstrom .pud_entry = wp_clean_pud_entry, 250c5acad84SThomas Hellstrom .test_walk = wp_clean_test_walk, 251c5acad84SThomas Hellstrom .pre_vma = wp_clean_pre_vma, 252c5acad84SThomas Hellstrom .post_vma = wp_clean_post_vma 253c5acad84SThomas Hellstrom }; 254c5acad84SThomas Hellstrom 255c5acad84SThomas Hellstrom static const struct mm_walk_ops wp_walk_ops = { 256c5acad84SThomas Hellstrom .pte_entry = wp_pte, 257c5acad84SThomas Hellstrom .pmd_entry = wp_clean_pmd_entry, 258c5acad84SThomas Hellstrom .pud_entry = wp_clean_pud_entry, 259c5acad84SThomas Hellstrom .test_walk = wp_clean_test_walk, 260c5acad84SThomas Hellstrom .pre_vma = wp_clean_pre_vma, 261c5acad84SThomas Hellstrom .post_vma = wp_clean_post_vma 262c5acad84SThomas Hellstrom }; 263c5acad84SThomas Hellstrom 264c5acad84SThomas Hellstrom /** 265c5acad84SThomas Hellstrom * wp_shared_mapping_range - Write-protect all ptes in an address space range 266c5acad84SThomas Hellstrom * @mapping: The address_space we want to write protect 267c5acad84SThomas Hellstrom * @first_index: The first page offset in the range 268c5acad84SThomas Hellstrom * @nr: Number of incremental page offsets to cover 269c5acad84SThomas Hellstrom * 270c5acad84SThomas Hellstrom * Note: This function currently skips transhuge page-table entries, since 271c5acad84SThomas Hellstrom * it's intended for dirty-tracking on the PTE level. It will warn on 272c5acad84SThomas Hellstrom * encountering transhuge write-enabled entries, though, and can easily be 273c5acad84SThomas Hellstrom * extended to handle them as well. 274c5acad84SThomas Hellstrom * 275c5acad84SThomas Hellstrom * Return: The number of ptes actually write-protected. Note that 276c5acad84SThomas Hellstrom * already write-protected ptes are not counted. 277c5acad84SThomas Hellstrom */ 278c5acad84SThomas Hellstrom unsigned long wp_shared_mapping_range(struct address_space *mapping, 279c5acad84SThomas Hellstrom pgoff_t first_index, pgoff_t nr) 280c5acad84SThomas Hellstrom { 281c5acad84SThomas Hellstrom struct wp_walk wpwalk = { .total = 0 }; 282c5acad84SThomas Hellstrom 283c5acad84SThomas Hellstrom i_mmap_lock_read(mapping); 284c5acad84SThomas Hellstrom WARN_ON(walk_page_mapping(mapping, first_index, nr, &wp_walk_ops, 285c5acad84SThomas Hellstrom &wpwalk)); 286c5acad84SThomas Hellstrom i_mmap_unlock_read(mapping); 287c5acad84SThomas Hellstrom 288c5acad84SThomas Hellstrom return wpwalk.total; 289c5acad84SThomas Hellstrom } 290c5acad84SThomas Hellstrom EXPORT_SYMBOL_GPL(wp_shared_mapping_range); 291c5acad84SThomas Hellstrom 292c5acad84SThomas Hellstrom /** 293c5acad84SThomas Hellstrom * clean_record_shared_mapping_range - Clean and record all ptes in an 294c5acad84SThomas Hellstrom * address space range 295c5acad84SThomas Hellstrom * @mapping: The address_space we want to clean 296c5acad84SThomas Hellstrom * @first_index: The first page offset in the range 297c5acad84SThomas Hellstrom * @nr: Number of incremental page offsets to cover 298c5acad84SThomas Hellstrom * @bitmap_pgoff: The page offset of the first bit in @bitmap 299c5acad84SThomas Hellstrom * @bitmap: Pointer to a bitmap of at least @nr bits. The bitmap needs to 300c5acad84SThomas Hellstrom * cover the whole range @first_index..@first_index + @nr. 301c5acad84SThomas Hellstrom * @start: Pointer to number of the first set bit in @bitmap. 302c5acad84SThomas Hellstrom * is modified as new bits are set by the function. 303c5acad84SThomas Hellstrom * @end: Pointer to the number of the last set bit in @bitmap. 304c5acad84SThomas Hellstrom * none set. The value is modified as new bits are set by the function. 305c5acad84SThomas Hellstrom * 306c5acad84SThomas Hellstrom * Note: When this function returns there is no guarantee that a CPU has 307c5acad84SThomas Hellstrom * not already dirtied new ptes. However it will not clean any ptes not 308c5acad84SThomas Hellstrom * reported in the bitmap. The guarantees are as follows: 309c5acad84SThomas Hellstrom * a) All ptes dirty when the function starts executing will end up recorded 310c5acad84SThomas Hellstrom * in the bitmap. 311c5acad84SThomas Hellstrom * b) All ptes dirtied after that will either remain dirty, be recorded in the 312c5acad84SThomas Hellstrom * bitmap or both. 313c5acad84SThomas Hellstrom * 314c5acad84SThomas Hellstrom * If a caller needs to make sure all dirty ptes are picked up and none 315c5acad84SThomas Hellstrom * additional are added, it first needs to write-protect the address-space 316c5acad84SThomas Hellstrom * range and make sure new writers are blocked in page_mkwrite() or 317c5acad84SThomas Hellstrom * pfn_mkwrite(). And then after a TLB flush following the write-protection 318c5acad84SThomas Hellstrom * pick up all dirty bits. 319c5acad84SThomas Hellstrom * 320*b417941fSMel Gorman * This function currently skips transhuge page-table entries, since 321c5acad84SThomas Hellstrom * it's intended for dirty-tracking on the PTE level. It will warn on 322c5acad84SThomas Hellstrom * encountering transhuge dirty entries, though, and can easily be extended 323c5acad84SThomas Hellstrom * to handle them as well. 324c5acad84SThomas Hellstrom * 325c5acad84SThomas Hellstrom * Return: The number of dirty ptes actually cleaned. 326c5acad84SThomas Hellstrom */ 327c5acad84SThomas Hellstrom unsigned long clean_record_shared_mapping_range(struct address_space *mapping, 328c5acad84SThomas Hellstrom pgoff_t first_index, pgoff_t nr, 329c5acad84SThomas Hellstrom pgoff_t bitmap_pgoff, 330c5acad84SThomas Hellstrom unsigned long *bitmap, 331c5acad84SThomas Hellstrom pgoff_t *start, 332c5acad84SThomas Hellstrom pgoff_t *end) 333c5acad84SThomas Hellstrom { 334c5acad84SThomas Hellstrom bool none_set = (*start >= *end); 335c5acad84SThomas Hellstrom struct clean_walk cwalk = { 336c5acad84SThomas Hellstrom .base = { .total = 0 }, 337c5acad84SThomas Hellstrom .bitmap_pgoff = bitmap_pgoff, 338c5acad84SThomas Hellstrom .bitmap = bitmap, 339c5acad84SThomas Hellstrom .start = none_set ? nr : *start, 340c5acad84SThomas Hellstrom .end = none_set ? 0 : *end, 341c5acad84SThomas Hellstrom }; 342c5acad84SThomas Hellstrom 343c5acad84SThomas Hellstrom i_mmap_lock_read(mapping); 344c5acad84SThomas Hellstrom WARN_ON(walk_page_mapping(mapping, first_index, nr, &clean_walk_ops, 345c5acad84SThomas Hellstrom &cwalk.base)); 346c5acad84SThomas Hellstrom i_mmap_unlock_read(mapping); 347c5acad84SThomas Hellstrom 348c5acad84SThomas Hellstrom *start = cwalk.start; 349c5acad84SThomas Hellstrom *end = cwalk.end; 350c5acad84SThomas Hellstrom 351c5acad84SThomas Hellstrom return cwalk.base.total; 352c5acad84SThomas Hellstrom } 353c5acad84SThomas Hellstrom EXPORT_SYMBOL_GPL(clean_record_shared_mapping_range); 354