1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * vma.h 4 * 5 * Core VMA manipulation API implemented in vma.c. 6 */ 7 #ifndef __MM_VMA_H 8 #define __MM_VMA_H 9 10 /* 11 * VMA lock generalization 12 */ 13 struct vma_prepare { 14 struct vm_area_struct *vma; 15 struct vm_area_struct *adj_next; 16 struct file *file; 17 struct address_space *mapping; 18 struct anon_vma *anon_vma; 19 struct vm_area_struct *insert; 20 struct vm_area_struct *remove; 21 struct vm_area_struct *remove2; 22 }; 23 24 struct unlink_vma_file_batch { 25 int count; 26 struct vm_area_struct *vmas[8]; 27 }; 28 29 /* 30 * vma munmap operation 31 */ 32 struct vma_munmap_struct { 33 struct vma_iterator *vmi; 34 struct vm_area_struct *vma; /* The first vma to munmap */ 35 struct vm_area_struct *prev; /* vma before the munmap area */ 36 struct vm_area_struct *next; /* vma after the munmap area */ 37 struct list_head *uf; /* Userfaultfd list_head */ 38 unsigned long start; /* Aligned start addr (inclusive) */ 39 unsigned long end; /* Aligned end addr (exclusive) */ 40 unsigned long unmap_start; /* Unmap PTE start */ 41 unsigned long unmap_end; /* Unmap PTE end */ 42 int vma_count; /* Number of vmas that will be removed */ 43 bool unlock; /* Unlock after the munmap */ 44 bool clear_ptes; /* If there are outstanding PTE to be cleared */ 45 /* 2 byte hole */ 46 unsigned long nr_pages; /* Number of pages being removed */ 47 unsigned long locked_vm; /* Number of locked pages */ 48 unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */ 49 unsigned long exec_vm; 50 unsigned long stack_vm; 51 unsigned long data_vm; 52 }; 53 54 enum vma_merge_state { 55 VMA_MERGE_START, 56 VMA_MERGE_ERROR_NOMEM, 57 VMA_MERGE_NOMERGE, 58 VMA_MERGE_SUCCESS, 59 }; 60 61 /* 62 * Describes a VMA merge operation and is threaded throughout it. 63 * 64 * Any of the fields may be mutated by the merge operation, so no guarantees are 65 * made to the contents of this structure after a merge operation has completed. 66 */ 67 struct vma_merge_struct { 68 struct mm_struct *mm; 69 struct vma_iterator *vmi; 70 /* 71 * Adjacent VMAs, any of which may be NULL if not present: 72 * 73 * |------|--------|------| 74 * | prev | middle | next | 75 * |------|--------|------| 76 * 77 * middle may not yet exist in the case of a proposed new VMA being 78 * merged, or it may be an existing VMA. 79 * 80 * next may be assigned by the caller. 81 */ 82 struct vm_area_struct *prev; 83 struct vm_area_struct *middle; 84 struct vm_area_struct *next; 85 /* This is the VMA we ultimately target to become the merged VMA. */ 86 struct vm_area_struct *target; 87 /* 88 * Initially, the start, end, pgoff fields are provided by the caller 89 * and describe the proposed new VMA range, whether modifying an 90 * existing VMA (which will be 'middle'), or adding a new one. 91 * 92 * During the merge process these fields are updated to describe the new 93 * range _including those VMAs which will be merged_. 94 */ 95 unsigned long start; 96 unsigned long end; 97 pgoff_t pgoff; 98 99 unsigned long flags; 100 struct file *file; 101 struct anon_vma *anon_vma; 102 struct mempolicy *policy; 103 struct vm_userfaultfd_ctx uffd_ctx; 104 struct anon_vma_name *anon_name; 105 enum vma_merge_state state; 106 107 /* Flags which callers can use to modify merge behaviour: */ 108 109 /* 110 * If we can expand, simply do so. We know there is nothing to merge to 111 * the right. Does not reset state upon failure to merge. The VMA 112 * iterator is assumed to be positioned at the previous VMA, rather than 113 * at the gap. 114 */ 115 bool just_expand :1; 116 117 /* Internal flags set during merge process: */ 118 119 /* 120 * Internal flag indicating the merge increases vmg->middle->vm_start 121 * (and thereby, vmg->prev->vm_end). 122 */ 123 bool __adjust_middle_start :1; 124 /* 125 * Internal flag indicating the merge decreases vmg->next->vm_start 126 * (and thereby, vmg->middle->vm_end). 127 */ 128 bool __adjust_next_start :1; 129 /* 130 * Internal flag used during the merge operation to indicate we will 131 * remove vmg->middle. 132 */ 133 bool __remove_middle :1; 134 /* 135 * Internal flag used during the merge operationr to indicate we will 136 * remove vmg->next. 137 */ 138 bool __remove_next :1; 139 140 }; 141 142 static inline bool vmg_nomem(struct vma_merge_struct *vmg) 143 { 144 return vmg->state == VMA_MERGE_ERROR_NOMEM; 145 } 146 147 /* Assumes addr >= vma->vm_start. */ 148 static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma, 149 unsigned long addr) 150 { 151 return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start); 152 } 153 154 #define VMG_STATE(name, mm_, vmi_, start_, end_, flags_, pgoff_) \ 155 struct vma_merge_struct name = { \ 156 .mm = mm_, \ 157 .vmi = vmi_, \ 158 .start = start_, \ 159 .end = end_, \ 160 .flags = flags_, \ 161 .pgoff = pgoff_, \ 162 .state = VMA_MERGE_START, \ 163 } 164 165 #define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_) \ 166 struct vma_merge_struct name = { \ 167 .mm = vma_->vm_mm, \ 168 .vmi = vmi_, \ 169 .prev = prev_, \ 170 .middle = vma_, \ 171 .next = NULL, \ 172 .start = start_, \ 173 .end = end_, \ 174 .flags = vma_->vm_flags, \ 175 .pgoff = vma_pgoff_offset(vma_, start_), \ 176 .file = vma_->vm_file, \ 177 .anon_vma = vma_->anon_vma, \ 178 .policy = vma_policy(vma_), \ 179 .uffd_ctx = vma_->vm_userfaultfd_ctx, \ 180 .anon_name = anon_vma_name(vma_), \ 181 .state = VMA_MERGE_START, \ 182 } 183 184 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 185 void validate_mm(struct mm_struct *mm); 186 #else 187 #define validate_mm(mm) do { } while (0) 188 #endif 189 190 __must_check int vma_expand(struct vma_merge_struct *vmg); 191 __must_check int vma_shrink(struct vma_iterator *vmi, 192 struct vm_area_struct *vma, 193 unsigned long start, unsigned long end, pgoff_t pgoff); 194 195 static inline int vma_iter_store_gfp(struct vma_iterator *vmi, 196 struct vm_area_struct *vma, gfp_t gfp) 197 198 { 199 if (vmi->mas.status != ma_start && 200 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) 201 vma_iter_invalidate(vmi); 202 203 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); 204 mas_store_gfp(&vmi->mas, vma, gfp); 205 if (unlikely(mas_is_err(&vmi->mas))) 206 return -ENOMEM; 207 208 vma_mark_attached(vma); 209 return 0; 210 } 211 212 int 213 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 214 struct mm_struct *mm, unsigned long start, 215 unsigned long end, struct list_head *uf, bool unlock); 216 217 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, 218 unsigned long start, size_t len, struct list_head *uf, 219 bool unlock); 220 221 void remove_vma(struct vm_area_struct *vma); 222 223 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, 224 struct vm_area_struct *prev, struct vm_area_struct *next); 225 226 /* We are about to modify the VMA's flags. */ 227 __must_check struct vm_area_struct 228 *vma_modify_flags(struct vma_iterator *vmi, 229 struct vm_area_struct *prev, struct vm_area_struct *vma, 230 unsigned long start, unsigned long end, 231 unsigned long new_flags); 232 233 /* We are about to modify the VMA's flags and/or anon_name. */ 234 __must_check struct vm_area_struct 235 *vma_modify_flags_name(struct vma_iterator *vmi, 236 struct vm_area_struct *prev, 237 struct vm_area_struct *vma, 238 unsigned long start, 239 unsigned long end, 240 unsigned long new_flags, 241 struct anon_vma_name *new_name); 242 243 /* We are about to modify the VMA's memory policy. */ 244 __must_check struct vm_area_struct 245 *vma_modify_policy(struct vma_iterator *vmi, 246 struct vm_area_struct *prev, 247 struct vm_area_struct *vma, 248 unsigned long start, unsigned long end, 249 struct mempolicy *new_pol); 250 251 /* We are about to modify the VMA's flags and/or uffd context. */ 252 __must_check struct vm_area_struct 253 *vma_modify_flags_uffd(struct vma_iterator *vmi, 254 struct vm_area_struct *prev, 255 struct vm_area_struct *vma, 256 unsigned long start, unsigned long end, 257 unsigned long new_flags, 258 struct vm_userfaultfd_ctx new_ctx); 259 260 __must_check struct vm_area_struct 261 *vma_merge_new_range(struct vma_merge_struct *vmg); 262 263 __must_check struct vm_area_struct 264 *vma_merge_extend(struct vma_iterator *vmi, 265 struct vm_area_struct *vma, 266 unsigned long delta); 267 268 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb); 269 270 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb); 271 272 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb, 273 struct vm_area_struct *vma); 274 275 void unlink_file_vma(struct vm_area_struct *vma); 276 277 void vma_link_file(struct vm_area_struct *vma); 278 279 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma); 280 281 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 282 unsigned long addr, unsigned long len, pgoff_t pgoff, 283 bool *need_rmap_locks); 284 285 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma); 286 287 bool vma_needs_dirty_tracking(struct vm_area_struct *vma); 288 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); 289 290 int mm_take_all_locks(struct mm_struct *mm); 291 void mm_drop_all_locks(struct mm_struct *mm); 292 293 unsigned long mmap_region(struct file *file, unsigned long addr, 294 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, 295 struct list_head *uf); 296 297 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma, 298 unsigned long addr, unsigned long request, unsigned long flags); 299 300 unsigned long unmapped_area(struct vm_unmapped_area_info *info); 301 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info); 302 303 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma) 304 { 305 /* 306 * We want to check manually if we can change individual PTEs writable 307 * if we can't do that automatically for all PTEs in a mapping. For 308 * private mappings, that's always the case when we have write 309 * permissions as we properly have to handle COW. 310 */ 311 if (vma->vm_flags & VM_SHARED) 312 return vma_wants_writenotify(vma, vma->vm_page_prot); 313 return !!(vma->vm_flags & VM_WRITE); 314 } 315 316 #ifdef CONFIG_MMU 317 static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) 318 { 319 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); 320 } 321 #endif 322 323 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi, 324 unsigned long min) 325 { 326 return mas_prev(&vmi->mas, min); 327 } 328 329 /* 330 * These three helpers classifies VMAs for virtual memory accounting. 331 */ 332 333 /* 334 * Executable code area - executable, not writable, not stack 335 */ 336 static inline bool is_exec_mapping(vm_flags_t flags) 337 { 338 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC; 339 } 340 341 /* 342 * Stack area (including shadow stacks) 343 * 344 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: 345 * do_mmap() forbids all other combinations. 346 */ 347 static inline bool is_stack_mapping(vm_flags_t flags) 348 { 349 return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK); 350 } 351 352 /* 353 * Data area - private, writable, not stack 354 */ 355 static inline bool is_data_mapping(vm_flags_t flags) 356 { 357 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; 358 } 359 360 361 static inline void vma_iter_config(struct vma_iterator *vmi, 362 unsigned long index, unsigned long last) 363 { 364 __mas_set_range(&vmi->mas, index, last - 1); 365 } 366 367 static inline void vma_iter_reset(struct vma_iterator *vmi) 368 { 369 mas_reset(&vmi->mas); 370 } 371 372 static inline 373 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min) 374 { 375 return mas_prev_range(&vmi->mas, min); 376 } 377 378 static inline 379 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max) 380 { 381 return mas_next_range(&vmi->mas, max); 382 } 383 384 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min, 385 unsigned long max, unsigned long size) 386 { 387 return mas_empty_area(&vmi->mas, min, max - 1, size); 388 } 389 390 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min, 391 unsigned long max, unsigned long size) 392 { 393 return mas_empty_area_rev(&vmi->mas, min, max - 1, size); 394 } 395 396 /* 397 * VMA Iterator functions shared between nommu and mmap 398 */ 399 static inline int vma_iter_prealloc(struct vma_iterator *vmi, 400 struct vm_area_struct *vma) 401 { 402 return mas_preallocate(&vmi->mas, vma, GFP_KERNEL); 403 } 404 405 static inline void vma_iter_clear(struct vma_iterator *vmi) 406 { 407 mas_store_prealloc(&vmi->mas, NULL); 408 } 409 410 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi) 411 { 412 return mas_walk(&vmi->mas); 413 } 414 415 /* Store a VMA with preallocated memory */ 416 static inline void vma_iter_store_overwrite(struct vma_iterator *vmi, 417 struct vm_area_struct *vma) 418 { 419 vma_assert_attached(vma); 420 421 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 422 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start && 423 vmi->mas.index > vma->vm_start)) { 424 pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n", 425 vmi->mas.index, vma->vm_start, vma->vm_start, 426 vma->vm_end, vmi->mas.index, vmi->mas.last); 427 } 428 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start && 429 vmi->mas.last < vma->vm_start)) { 430 pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n", 431 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end, 432 vmi->mas.index, vmi->mas.last); 433 } 434 #endif 435 436 if (vmi->mas.status != ma_start && 437 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) 438 vma_iter_invalidate(vmi); 439 440 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); 441 mas_store_prealloc(&vmi->mas, vma); 442 } 443 444 static inline void vma_iter_store_new(struct vma_iterator *vmi, 445 struct vm_area_struct *vma) 446 { 447 vma_mark_attached(vma); 448 vma_iter_store_overwrite(vmi, vma); 449 } 450 451 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi) 452 { 453 return vmi->mas.index; 454 } 455 456 static inline unsigned long vma_iter_end(struct vma_iterator *vmi) 457 { 458 return vmi->mas.last + 1; 459 } 460 461 static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi, 462 unsigned long count) 463 { 464 return mas_expected_entries(&vmi->mas, count); 465 } 466 467 static inline 468 struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi) 469 { 470 return mas_prev_range(&vmi->mas, 0); 471 } 472 473 /* 474 * Retrieve the next VMA and rewind the iterator to end of the previous VMA, or 475 * if no previous VMA, to index 0. 476 */ 477 static inline 478 struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi, 479 struct vm_area_struct **pprev) 480 { 481 struct vm_area_struct *next = vma_next(vmi); 482 struct vm_area_struct *prev = vma_prev(vmi); 483 484 /* 485 * Consider the case where no previous VMA exists. We advance to the 486 * next VMA, skipping any gap, then rewind to the start of the range. 487 * 488 * If we were to unconditionally advance to the next range we'd wind up 489 * at the next VMA again, so we check to ensure there is a previous VMA 490 * to skip over. 491 */ 492 if (prev) 493 vma_iter_next_range(vmi); 494 495 if (pprev) 496 *pprev = prev; 497 498 return next; 499 } 500 501 #ifdef CONFIG_64BIT 502 503 static inline bool vma_is_sealed(struct vm_area_struct *vma) 504 { 505 return (vma->vm_flags & VM_SEALED); 506 } 507 508 /* 509 * check if a vma is sealed for modification. 510 * return true, if modification is allowed. 511 */ 512 static inline bool can_modify_vma(struct vm_area_struct *vma) 513 { 514 if (unlikely(vma_is_sealed(vma))) 515 return false; 516 517 return true; 518 } 519 520 bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior); 521 522 #else 523 524 static inline bool can_modify_vma(struct vm_area_struct *vma) 525 { 526 return true; 527 } 528 529 static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior) 530 { 531 return true; 532 } 533 534 #endif 535 536 #if defined(CONFIG_STACK_GROWSUP) 537 int expand_upwards(struct vm_area_struct *vma, unsigned long address); 538 #endif 539 540 int expand_downwards(struct vm_area_struct *vma, unsigned long address); 541 542 int __vm_munmap(unsigned long start, size_t len, bool unlock); 543 544 #endif /* __MM_VMA_H */ 545