1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/pagewalk.h> 3 #include <linux/hugetlb.h> 4 #include <linux/bitops.h> 5 #include <linux/mmu_notifier.h> 6 #include <asm/cacheflush.h> 7 #include <asm/tlbflush.h> 8 9 /** 10 * struct wp_walk - Private struct for pagetable walk callbacks 11 * @range: Range for mmu notifiers 12 * @tlbflush_start: Address of first modified pte 13 * @tlbflush_end: Address of last modified pte + 1 14 * @total: Total number of modified ptes 15 */ 16 struct wp_walk { 17 struct mmu_notifier_range range; 18 unsigned long tlbflush_start; 19 unsigned long tlbflush_end; 20 unsigned long total; 21 }; 22 23 /** 24 * wp_pte - Write-protect a pte 25 * @pte: Pointer to the pte 26 * @addr: The virtual page address 27 * @walk: pagetable walk callback argument 28 * 29 * The function write-protects a pte and records the range in 30 * virtual address space of touched ptes for efficient range TLB flushes. 31 */ 32 static int wp_pte(pte_t *pte, unsigned long addr, unsigned long end, 33 struct mm_walk *walk) 34 { 35 struct wp_walk *wpwalk = walk->private; 36 pte_t ptent = *pte; 37 38 if (pte_write(ptent)) { 39 pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte); 40 41 ptent = pte_wrprotect(old_pte); 42 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); 43 wpwalk->total++; 44 wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr); 45 wpwalk->tlbflush_end = max(wpwalk->tlbflush_end, 46 addr + PAGE_SIZE); 47 } 48 49 return 0; 50 } 51 52 /** 53 * struct clean_walk - Private struct for the clean_record_pte function. 54 * @base: struct wp_walk we derive from 55 * @bitmap_pgoff: Address_space Page offset of the first bit in @bitmap 56 * @bitmap: Bitmap with one bit for each page offset in the address_space range 57 * covered. 58 * @start: Address_space page offset of first modified pte relative 59 * to @bitmap_pgoff 60 * @end: Address_space page offset of last modified pte relative 61 * to @bitmap_pgoff 62 */ 63 struct clean_walk { 64 struct wp_walk base; 65 pgoff_t bitmap_pgoff; 66 unsigned long *bitmap; 67 pgoff_t start; 68 pgoff_t end; 69 }; 70 71 #define to_clean_walk(_wpwalk) container_of(_wpwalk, struct clean_walk, base) 72 73 /** 74 * clean_record_pte - Clean a pte and record its address space offset in a 75 * bitmap 76 * @pte: Pointer to the pte 77 * @addr: The virtual page address 78 * @walk: pagetable walk callback argument 79 * 80 * The function cleans a pte and records the range in 81 * virtual address space of touched ptes for efficient TLB flushes. 82 * It also records dirty ptes in a bitmap representing page offsets 83 * in the address_space, as well as the first and last of the bits 84 * touched. 85 */ 86 static int clean_record_pte(pte_t *pte, unsigned long addr, 87 unsigned long end, struct mm_walk *walk) 88 { 89 struct wp_walk *wpwalk = walk->private; 90 struct clean_walk *cwalk = to_clean_walk(wpwalk); 91 pte_t ptent = *pte; 92 93 if (pte_dirty(ptent)) { 94 pgoff_t pgoff = ((addr - walk->vma->vm_start) >> PAGE_SHIFT) + 95 walk->vma->vm_pgoff - cwalk->bitmap_pgoff; 96 pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte); 97 98 ptent = pte_mkclean(old_pte); 99 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); 100 101 wpwalk->total++; 102 wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr); 103 wpwalk->tlbflush_end = max(wpwalk->tlbflush_end, 104 addr + PAGE_SIZE); 105 106 __set_bit(pgoff, cwalk->bitmap); 107 cwalk->start = min(cwalk->start, pgoff); 108 cwalk->end = max(cwalk->end, pgoff + 1); 109 } 110 111 return 0; 112 } 113 114 /* wp_clean_pmd_entry - The pagewalk pmd callback. */ 115 static int wp_clean_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long end, 116 struct mm_walk *walk) 117 { 118 /* Dirty-tracking should be handled on the pte level */ 119 pmd_t pmdval = pmd_read_atomic(pmd); 120 121 if (pmd_trans_huge(pmdval) || pmd_devmap(pmdval)) 122 WARN_ON(pmd_write(pmdval) || pmd_dirty(pmdval)); 123 124 return 0; 125 } 126 127 /* wp_clean_pud_entry - The pagewalk pud callback. */ 128 static int wp_clean_pud_entry(pud_t *pud, unsigned long addr, unsigned long end, 129 struct mm_walk *walk) 130 { 131 /* Dirty-tracking should be handled on the pte level */ 132 pud_t pudval = READ_ONCE(*pud); 133 134 if (pud_trans_huge(pudval) || pud_devmap(pudval)) 135 WARN_ON(pud_write(pudval) || pud_dirty(pudval)); 136 137 return 0; 138 } 139 140 /* 141 * wp_clean_pre_vma - The pagewalk pre_vma callback. 142 * 143 * The pre_vma callback performs the cache flush, stages the tlb flush 144 * and calls the necessary mmu notifiers. 145 */ 146 static int wp_clean_pre_vma(unsigned long start, unsigned long end, 147 struct mm_walk *walk) 148 { 149 struct wp_walk *wpwalk = walk->private; 150 151 wpwalk->tlbflush_start = end; 152 wpwalk->tlbflush_end = start; 153 154 mmu_notifier_range_init(&wpwalk->range, MMU_NOTIFY_PROTECTION_PAGE, 0, 155 walk->vma, walk->mm, start, end); 156 mmu_notifier_invalidate_range_start(&wpwalk->range); 157 flush_cache_range(walk->vma, start, end); 158 159 /* 160 * We're not using tlb_gather_mmu() since typically 161 * only a small subrange of PTEs are affected, whereas 162 * tlb_gather_mmu() records the full range. 163 */ 164 inc_tlb_flush_pending(walk->mm); 165 166 return 0; 167 } 168 169 /* 170 * wp_clean_post_vma - The pagewalk post_vma callback. 171 * 172 * The post_vma callback performs the tlb flush and calls necessary mmu 173 * notifiers. 174 */ 175 static void wp_clean_post_vma(struct mm_walk *walk) 176 { 177 struct wp_walk *wpwalk = walk->private; 178 179 if (mm_tlb_flush_nested(walk->mm)) 180 flush_tlb_range(walk->vma, wpwalk->range.start, 181 wpwalk->range.end); 182 else if (wpwalk->tlbflush_end > wpwalk->tlbflush_start) 183 flush_tlb_range(walk->vma, wpwalk->tlbflush_start, 184 wpwalk->tlbflush_end); 185 186 mmu_notifier_invalidate_range_end(&wpwalk->range); 187 dec_tlb_flush_pending(walk->mm); 188 } 189 190 /* 191 * wp_clean_test_walk - The pagewalk test_walk callback. 192 * 193 * Won't perform dirty-tracking on COW, read-only or HUGETLB vmas. 194 */ 195 static int wp_clean_test_walk(unsigned long start, unsigned long end, 196 struct mm_walk *walk) 197 { 198 unsigned long vm_flags = READ_ONCE(walk->vma->vm_flags); 199 200 /* Skip non-applicable VMAs */ 201 if ((vm_flags & (VM_SHARED | VM_MAYWRITE | VM_HUGETLB)) != 202 (VM_SHARED | VM_MAYWRITE)) 203 return 1; 204 205 return 0; 206 } 207 208 static const struct mm_walk_ops clean_walk_ops = { 209 .pte_entry = clean_record_pte, 210 .pmd_entry = wp_clean_pmd_entry, 211 .pud_entry = wp_clean_pud_entry, 212 .test_walk = wp_clean_test_walk, 213 .pre_vma = wp_clean_pre_vma, 214 .post_vma = wp_clean_post_vma 215 }; 216 217 static const struct mm_walk_ops wp_walk_ops = { 218 .pte_entry = wp_pte, 219 .pmd_entry = wp_clean_pmd_entry, 220 .pud_entry = wp_clean_pud_entry, 221 .test_walk = wp_clean_test_walk, 222 .pre_vma = wp_clean_pre_vma, 223 .post_vma = wp_clean_post_vma 224 }; 225 226 /** 227 * wp_shared_mapping_range - Write-protect all ptes in an address space range 228 * @mapping: The address_space we want to write protect 229 * @first_index: The first page offset in the range 230 * @nr: Number of incremental page offsets to cover 231 * 232 * Note: This function currently skips transhuge page-table entries, since 233 * it's intended for dirty-tracking on the PTE level. It will warn on 234 * encountering transhuge write-enabled entries, though, and can easily be 235 * extended to handle them as well. 236 * 237 * Return: The number of ptes actually write-protected. Note that 238 * already write-protected ptes are not counted. 239 */ 240 unsigned long wp_shared_mapping_range(struct address_space *mapping, 241 pgoff_t first_index, pgoff_t nr) 242 { 243 struct wp_walk wpwalk = { .total = 0 }; 244 245 i_mmap_lock_read(mapping); 246 WARN_ON(walk_page_mapping(mapping, first_index, nr, &wp_walk_ops, 247 &wpwalk)); 248 i_mmap_unlock_read(mapping); 249 250 return wpwalk.total; 251 } 252 EXPORT_SYMBOL_GPL(wp_shared_mapping_range); 253 254 /** 255 * clean_record_shared_mapping_range - Clean and record all ptes in an 256 * address space range 257 * @mapping: The address_space we want to clean 258 * @first_index: The first page offset in the range 259 * @nr: Number of incremental page offsets to cover 260 * @bitmap_pgoff: The page offset of the first bit in @bitmap 261 * @bitmap: Pointer to a bitmap of at least @nr bits. The bitmap needs to 262 * cover the whole range @first_index..@first_index + @nr. 263 * @start: Pointer to number of the first set bit in @bitmap. 264 * is modified as new bits are set by the function. 265 * @end: Pointer to the number of the last set bit in @bitmap. 266 * none set. The value is modified as new bits are set by the function. 267 * 268 * Note: When this function returns there is no guarantee that a CPU has 269 * not already dirtied new ptes. However it will not clean any ptes not 270 * reported in the bitmap. The guarantees are as follows: 271 * a) All ptes dirty when the function starts executing will end up recorded 272 * in the bitmap. 273 * b) All ptes dirtied after that will either remain dirty, be recorded in the 274 * bitmap or both. 275 * 276 * If a caller needs to make sure all dirty ptes are picked up and none 277 * additional are added, it first needs to write-protect the address-space 278 * range and make sure new writers are blocked in page_mkwrite() or 279 * pfn_mkwrite(). And then after a TLB flush following the write-protection 280 * pick up all dirty bits. 281 * 282 * Note: This function currently skips transhuge page-table entries, since 283 * it's intended for dirty-tracking on the PTE level. It will warn on 284 * encountering transhuge dirty entries, though, and can easily be extended 285 * to handle them as well. 286 * 287 * Return: The number of dirty ptes actually cleaned. 288 */ 289 unsigned long clean_record_shared_mapping_range(struct address_space *mapping, 290 pgoff_t first_index, pgoff_t nr, 291 pgoff_t bitmap_pgoff, 292 unsigned long *bitmap, 293 pgoff_t *start, 294 pgoff_t *end) 295 { 296 bool none_set = (*start >= *end); 297 struct clean_walk cwalk = { 298 .base = { .total = 0 }, 299 .bitmap_pgoff = bitmap_pgoff, 300 .bitmap = bitmap, 301 .start = none_set ? nr : *start, 302 .end = none_set ? 0 : *end, 303 }; 304 305 i_mmap_lock_read(mapping); 306 WARN_ON(walk_page_mapping(mapping, first_index, nr, &clean_walk_ops, 307 &cwalk.base)); 308 i_mmap_unlock_read(mapping); 309 310 *start = cwalk.start; 311 *end = cwalk.end; 312 313 return cwalk.base.total; 314 } 315 EXPORT_SYMBOL_GPL(clean_record_shared_mapping_range); 316