1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * vma.h 4 * 5 * Core VMA manipulation API implemented in vma.c. 6 */ 7 #ifndef __MM_VMA_H 8 #define __MM_VMA_H 9 10 /* 11 * VMA lock generalization 12 */ 13 struct vma_prepare { 14 struct vm_area_struct *vma; 15 struct vm_area_struct *adj_next; 16 struct file *file; 17 struct address_space *mapping; 18 struct anon_vma *anon_vma; 19 struct vm_area_struct *insert; 20 struct vm_area_struct *remove; 21 struct vm_area_struct *remove2; 22 }; 23 24 struct unlink_vma_file_batch { 25 int count; 26 struct vm_area_struct *vmas[8]; 27 }; 28 29 /* 30 * vma munmap operation 31 */ 32 struct vma_munmap_struct { 33 struct vma_iterator *vmi; 34 struct vm_area_struct *vma; /* The first vma to munmap */ 35 struct vm_area_struct *prev; /* vma before the munmap area */ 36 struct vm_area_struct *next; /* vma after the munmap area */ 37 struct list_head *uf; /* Userfaultfd list_head */ 38 unsigned long start; /* Aligned start addr (inclusive) */ 39 unsigned long end; /* Aligned end addr (exclusive) */ 40 unsigned long unmap_start; /* Unmap PTE start */ 41 unsigned long unmap_end; /* Unmap PTE end */ 42 int vma_count; /* Number of vmas that will be removed */ 43 bool unlock; /* Unlock after the munmap */ 44 bool clear_ptes; /* If there are outstanding PTE to be cleared */ 45 /* 2 byte hole */ 46 unsigned long nr_pages; /* Number of pages being removed */ 47 unsigned long locked_vm; /* Number of locked pages */ 48 unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */ 49 unsigned long exec_vm; 50 unsigned long stack_vm; 51 unsigned long data_vm; 52 }; 53 54 enum vma_merge_state { 55 VMA_MERGE_START, 56 VMA_MERGE_ERROR_NOMEM, 57 VMA_MERGE_NOMERGE, 58 VMA_MERGE_SUCCESS, 59 }; 60 61 enum vma_merge_flags { 62 VMG_FLAG_DEFAULT = 0, 63 /* 64 * If we can expand, simply do so. We know there is nothing to merge to 65 * the right. Does not reset state upon failure to merge. The VMA 66 * iterator is assumed to be positioned at the previous VMA, rather than 67 * at the gap. 68 */ 69 VMG_FLAG_JUST_EXPAND = 1 << 0, 70 }; 71 72 /* Represents a VMA merge operation. */ 73 struct vma_merge_struct { 74 struct mm_struct *mm; 75 struct vma_iterator *vmi; 76 pgoff_t pgoff; 77 struct vm_area_struct *prev; 78 struct vm_area_struct *next; /* Modified by vma_merge(). */ 79 struct vm_area_struct *vma; /* Either a new VMA or the one being modified. */ 80 unsigned long start; 81 unsigned long end; 82 unsigned long flags; 83 struct file *file; 84 struct anon_vma *anon_vma; 85 struct mempolicy *policy; 86 struct vm_userfaultfd_ctx uffd_ctx; 87 struct anon_vma_name *anon_name; 88 enum vma_merge_flags merge_flags; 89 enum vma_merge_state state; 90 }; 91 92 static inline bool vmg_nomem(struct vma_merge_struct *vmg) 93 { 94 return vmg->state == VMA_MERGE_ERROR_NOMEM; 95 } 96 97 /* Assumes addr >= vma->vm_start. */ 98 static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma, 99 unsigned long addr) 100 { 101 return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start); 102 } 103 104 #define VMG_STATE(name, mm_, vmi_, start_, end_, flags_, pgoff_) \ 105 struct vma_merge_struct name = { \ 106 .mm = mm_, \ 107 .vmi = vmi_, \ 108 .start = start_, \ 109 .end = end_, \ 110 .flags = flags_, \ 111 .pgoff = pgoff_, \ 112 .state = VMA_MERGE_START, \ 113 .merge_flags = VMG_FLAG_DEFAULT, \ 114 } 115 116 #define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_) \ 117 struct vma_merge_struct name = { \ 118 .mm = vma_->vm_mm, \ 119 .vmi = vmi_, \ 120 .prev = prev_, \ 121 .next = NULL, \ 122 .vma = vma_, \ 123 .start = start_, \ 124 .end = end_, \ 125 .flags = vma_->vm_flags, \ 126 .pgoff = vma_pgoff_offset(vma_, start_), \ 127 .file = vma_->vm_file, \ 128 .anon_vma = vma_->anon_vma, \ 129 .policy = vma_policy(vma_), \ 130 .uffd_ctx = vma_->vm_userfaultfd_ctx, \ 131 .anon_name = anon_vma_name(vma_), \ 132 .state = VMA_MERGE_START, \ 133 .merge_flags = VMG_FLAG_DEFAULT, \ 134 } 135 136 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 137 void validate_mm(struct mm_struct *mm); 138 #else 139 #define validate_mm(mm) do { } while (0) 140 #endif 141 142 __must_check int vma_expand(struct vma_merge_struct *vmg); 143 __must_check int vma_shrink(struct vma_iterator *vmi, 144 struct vm_area_struct *vma, 145 unsigned long start, unsigned long end, pgoff_t pgoff); 146 147 static inline int vma_iter_store_gfp(struct vma_iterator *vmi, 148 struct vm_area_struct *vma, gfp_t gfp) 149 150 { 151 if (vmi->mas.status != ma_start && 152 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) 153 vma_iter_invalidate(vmi); 154 155 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); 156 mas_store_gfp(&vmi->mas, vma, gfp); 157 if (unlikely(mas_is_err(&vmi->mas))) 158 return -ENOMEM; 159 160 return 0; 161 } 162 163 int 164 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 165 struct mm_struct *mm, unsigned long start, 166 unsigned long end, struct list_head *uf, bool unlock); 167 168 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, 169 unsigned long start, size_t len, struct list_head *uf, 170 bool unlock); 171 172 void remove_vma(struct vm_area_struct *vma, bool unreachable); 173 174 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, 175 struct vm_area_struct *prev, struct vm_area_struct *next); 176 177 /* We are about to modify the VMA's flags. */ 178 __must_check struct vm_area_struct 179 *vma_modify_flags(struct vma_iterator *vmi, 180 struct vm_area_struct *prev, struct vm_area_struct *vma, 181 unsigned long start, unsigned long end, 182 unsigned long new_flags); 183 184 /* We are about to modify the VMA's flags and/or anon_name. */ 185 __must_check struct vm_area_struct 186 *vma_modify_flags_name(struct vma_iterator *vmi, 187 struct vm_area_struct *prev, 188 struct vm_area_struct *vma, 189 unsigned long start, 190 unsigned long end, 191 unsigned long new_flags, 192 struct anon_vma_name *new_name); 193 194 /* We are about to modify the VMA's memory policy. */ 195 __must_check struct vm_area_struct 196 *vma_modify_policy(struct vma_iterator *vmi, 197 struct vm_area_struct *prev, 198 struct vm_area_struct *vma, 199 unsigned long start, unsigned long end, 200 struct mempolicy *new_pol); 201 202 /* We are about to modify the VMA's flags and/or uffd context. */ 203 __must_check struct vm_area_struct 204 *vma_modify_flags_uffd(struct vma_iterator *vmi, 205 struct vm_area_struct *prev, 206 struct vm_area_struct *vma, 207 unsigned long start, unsigned long end, 208 unsigned long new_flags, 209 struct vm_userfaultfd_ctx new_ctx); 210 211 __must_check struct vm_area_struct 212 *vma_merge_new_range(struct vma_merge_struct *vmg); 213 214 __must_check struct vm_area_struct 215 *vma_merge_extend(struct vma_iterator *vmi, 216 struct vm_area_struct *vma, 217 unsigned long delta); 218 219 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb); 220 221 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb); 222 223 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb, 224 struct vm_area_struct *vma); 225 226 void unlink_file_vma(struct vm_area_struct *vma); 227 228 void vma_link_file(struct vm_area_struct *vma); 229 230 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma); 231 232 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 233 unsigned long addr, unsigned long len, pgoff_t pgoff, 234 bool *need_rmap_locks); 235 236 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma); 237 238 bool vma_needs_dirty_tracking(struct vm_area_struct *vma); 239 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); 240 241 int mm_take_all_locks(struct mm_struct *mm); 242 void mm_drop_all_locks(struct mm_struct *mm); 243 244 unsigned long mmap_region(struct file *file, unsigned long addr, 245 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, 246 struct list_head *uf); 247 248 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma, 249 unsigned long addr, unsigned long request, unsigned long flags); 250 251 unsigned long unmapped_area(struct vm_unmapped_area_info *info); 252 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info); 253 254 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma) 255 { 256 /* 257 * We want to check manually if we can change individual PTEs writable 258 * if we can't do that automatically for all PTEs in a mapping. For 259 * private mappings, that's always the case when we have write 260 * permissions as we properly have to handle COW. 261 */ 262 if (vma->vm_flags & VM_SHARED) 263 return vma_wants_writenotify(vma, vma->vm_page_prot); 264 return !!(vma->vm_flags & VM_WRITE); 265 } 266 267 #ifdef CONFIG_MMU 268 static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) 269 { 270 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); 271 } 272 #endif 273 274 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi, 275 unsigned long min) 276 { 277 return mas_prev(&vmi->mas, min); 278 } 279 280 /* 281 * These three helpers classifies VMAs for virtual memory accounting. 282 */ 283 284 /* 285 * Executable code area - executable, not writable, not stack 286 */ 287 static inline bool is_exec_mapping(vm_flags_t flags) 288 { 289 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC; 290 } 291 292 /* 293 * Stack area (including shadow stacks) 294 * 295 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: 296 * do_mmap() forbids all other combinations. 297 */ 298 static inline bool is_stack_mapping(vm_flags_t flags) 299 { 300 return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK); 301 } 302 303 /* 304 * Data area - private, writable, not stack 305 */ 306 static inline bool is_data_mapping(vm_flags_t flags) 307 { 308 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; 309 } 310 311 312 static inline void vma_iter_config(struct vma_iterator *vmi, 313 unsigned long index, unsigned long last) 314 { 315 __mas_set_range(&vmi->mas, index, last - 1); 316 } 317 318 static inline void vma_iter_reset(struct vma_iterator *vmi) 319 { 320 mas_reset(&vmi->mas); 321 } 322 323 static inline 324 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min) 325 { 326 return mas_prev_range(&vmi->mas, min); 327 } 328 329 static inline 330 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max) 331 { 332 return mas_next_range(&vmi->mas, max); 333 } 334 335 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min, 336 unsigned long max, unsigned long size) 337 { 338 return mas_empty_area(&vmi->mas, min, max - 1, size); 339 } 340 341 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min, 342 unsigned long max, unsigned long size) 343 { 344 return mas_empty_area_rev(&vmi->mas, min, max - 1, size); 345 } 346 347 /* 348 * VMA Iterator functions shared between nommu and mmap 349 */ 350 static inline int vma_iter_prealloc(struct vma_iterator *vmi, 351 struct vm_area_struct *vma) 352 { 353 return mas_preallocate(&vmi->mas, vma, GFP_KERNEL); 354 } 355 356 static inline void vma_iter_clear(struct vma_iterator *vmi) 357 { 358 mas_store_prealloc(&vmi->mas, NULL); 359 } 360 361 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi) 362 { 363 return mas_walk(&vmi->mas); 364 } 365 366 /* Store a VMA with preallocated memory */ 367 static inline void vma_iter_store(struct vma_iterator *vmi, 368 struct vm_area_struct *vma) 369 { 370 371 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 372 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start && 373 vmi->mas.index > vma->vm_start)) { 374 pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n", 375 vmi->mas.index, vma->vm_start, vma->vm_start, 376 vma->vm_end, vmi->mas.index, vmi->mas.last); 377 } 378 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start && 379 vmi->mas.last < vma->vm_start)) { 380 pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n", 381 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end, 382 vmi->mas.index, vmi->mas.last); 383 } 384 #endif 385 386 if (vmi->mas.status != ma_start && 387 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) 388 vma_iter_invalidate(vmi); 389 390 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); 391 mas_store_prealloc(&vmi->mas, vma); 392 } 393 394 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi) 395 { 396 return vmi->mas.index; 397 } 398 399 static inline unsigned long vma_iter_end(struct vma_iterator *vmi) 400 { 401 return vmi->mas.last + 1; 402 } 403 404 static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi, 405 unsigned long count) 406 { 407 return mas_expected_entries(&vmi->mas, count); 408 } 409 410 static inline 411 struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi) 412 { 413 return mas_prev_range(&vmi->mas, 0); 414 } 415 416 /* 417 * Retrieve the next VMA and rewind the iterator to end of the previous VMA, or 418 * if no previous VMA, to index 0. 419 */ 420 static inline 421 struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi, 422 struct vm_area_struct **pprev) 423 { 424 struct vm_area_struct *next = vma_next(vmi); 425 struct vm_area_struct *prev = vma_prev(vmi); 426 427 /* 428 * Consider the case where no previous VMA exists. We advance to the 429 * next VMA, skipping any gap, then rewind to the start of the range. 430 * 431 * If we were to unconditionally advance to the next range we'd wind up 432 * at the next VMA again, so we check to ensure there is a previous VMA 433 * to skip over. 434 */ 435 if (prev) 436 vma_iter_next_range(vmi); 437 438 if (pprev) 439 *pprev = prev; 440 441 return next; 442 } 443 444 #ifdef CONFIG_64BIT 445 446 static inline bool vma_is_sealed(struct vm_area_struct *vma) 447 { 448 return (vma->vm_flags & VM_SEALED); 449 } 450 451 /* 452 * check if a vma is sealed for modification. 453 * return true, if modification is allowed. 454 */ 455 static inline bool can_modify_vma(struct vm_area_struct *vma) 456 { 457 if (unlikely(vma_is_sealed(vma))) 458 return false; 459 460 return true; 461 } 462 463 bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior); 464 465 #else 466 467 static inline bool can_modify_vma(struct vm_area_struct *vma) 468 { 469 return true; 470 } 471 472 static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior) 473 { 474 return true; 475 } 476 477 #endif 478 479 #if defined(CONFIG_STACK_GROWSUP) 480 int expand_upwards(struct vm_area_struct *vma, unsigned long address); 481 #endif 482 483 int expand_downwards(struct vm_area_struct *vma, unsigned long address); 484 485 int __vm_munmap(unsigned long start, size_t len, bool unlock); 486 487 #endif /* __MM_VMA_H */ 488