1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * vma.h 4 * 5 * Core VMA manipulation API implemented in vma.c. 6 */ 7 #ifndef __MM_VMA_H 8 #define __MM_VMA_H 9 10 /* 11 * VMA lock generalization 12 */ 13 struct vma_prepare { 14 struct vm_area_struct *vma; 15 struct vm_area_struct *adj_next; 16 struct file *file; 17 struct address_space *mapping; 18 struct anon_vma *anon_vma; 19 struct vm_area_struct *insert; 20 struct vm_area_struct *remove; 21 struct vm_area_struct *remove2; 22 }; 23 24 struct unlink_vma_file_batch { 25 int count; 26 struct vm_area_struct *vmas[8]; 27 }; 28 29 /* 30 * vma munmap operation 31 */ 32 struct vma_munmap_struct { 33 struct vma_iterator *vmi; 34 struct vm_area_struct *vma; /* The first vma to munmap */ 35 struct vm_area_struct *prev; /* vma before the munmap area */ 36 struct vm_area_struct *next; /* vma after the munmap area */ 37 struct list_head *uf; /* Userfaultfd list_head */ 38 unsigned long start; /* Aligned start addr (inclusive) */ 39 unsigned long end; /* Aligned end addr (exclusive) */ 40 unsigned long unmap_start; /* Unmap PTE start */ 41 unsigned long unmap_end; /* Unmap PTE end */ 42 int vma_count; /* Number of vmas that will be removed */ 43 bool unlock; /* Unlock after the munmap */ 44 bool clear_ptes; /* If there are outstanding PTE to be cleared */ 45 /* 2 byte hole */ 46 unsigned long nr_pages; /* Number of pages being removed */ 47 unsigned long locked_vm; /* Number of locked pages */ 48 unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */ 49 unsigned long exec_vm; 50 unsigned long stack_vm; 51 unsigned long data_vm; 52 }; 53 54 enum vma_merge_state { 55 VMA_MERGE_START, 56 VMA_MERGE_ERROR_NOMEM, 57 VMA_MERGE_NOMERGE, 58 VMA_MERGE_SUCCESS, 59 }; 60 61 /* 62 * Describes a VMA merge operation and is threaded throughout it. 63 * 64 * Any of the fields may be mutated by the merge operation, so no guarantees are 65 * made to the contents of this structure after a merge operation has completed. 66 */ 67 struct vma_merge_struct { 68 struct mm_struct *mm; 69 struct vma_iterator *vmi; 70 /* 71 * Adjacent VMAs, any of which may be NULL if not present: 72 * 73 * |------|--------|------| 74 * | prev | middle | next | 75 * |------|--------|------| 76 * 77 * middle may not yet exist in the case of a proposed new VMA being 78 * merged, or it may be an existing VMA. 79 * 80 * next may be assigned by the caller. 81 */ 82 struct vm_area_struct *prev; 83 struct vm_area_struct *middle; 84 struct vm_area_struct *next; 85 /* This is the VMA we ultimately target to become the merged VMA. */ 86 struct vm_area_struct *target; 87 /* 88 * Initially, the start, end, pgoff fields are provided by the caller 89 * and describe the proposed new VMA range, whether modifying an 90 * existing VMA (which will be 'middle'), or adding a new one. 91 * 92 * During the merge process these fields are updated to describe the new 93 * range _including those VMAs which will be merged_. 94 */ 95 unsigned long start; 96 unsigned long end; 97 pgoff_t pgoff; 98 99 unsigned long flags; 100 struct file *file; 101 struct anon_vma *anon_vma; 102 struct mempolicy *policy; 103 struct vm_userfaultfd_ctx uffd_ctx; 104 struct anon_vma_name *anon_name; 105 enum vma_merge_state state; 106 107 /* Flags which callers can use to modify merge behaviour: */ 108 109 /* 110 * If we can expand, simply do so. We know there is nothing to merge to 111 * the right. Does not reset state upon failure to merge. The VMA 112 * iterator is assumed to be positioned at the previous VMA, rather than 113 * at the gap. 114 */ 115 bool just_expand :1; 116 117 /* Internal flags set during merge process: */ 118 119 /* 120 * Internal flag indicating the merge increases vmg->middle->vm_start 121 * (and thereby, vmg->prev->vm_end). 122 */ 123 bool __adjust_middle_start :1; 124 /* 125 * Internal flag indicating the merge decreases vmg->next->vm_start 126 * (and thereby, vmg->middle->vm_end). 127 */ 128 bool __adjust_next_start :1; 129 /* 130 * Internal flag used during the merge operation to indicate we will 131 * remove vmg->middle. 132 */ 133 bool __remove_middle :1; 134 /* 135 * Internal flag used during the merge operationr to indicate we will 136 * remove vmg->next. 137 */ 138 bool __remove_next :1; 139 140 }; 141 142 static inline bool vmg_nomem(struct vma_merge_struct *vmg) 143 { 144 return vmg->state == VMA_MERGE_ERROR_NOMEM; 145 } 146 147 /* Assumes addr >= vma->vm_start. */ 148 static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma, 149 unsigned long addr) 150 { 151 return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start); 152 } 153 154 #define VMG_STATE(name, mm_, vmi_, start_, end_, flags_, pgoff_) \ 155 struct vma_merge_struct name = { \ 156 .mm = mm_, \ 157 .vmi = vmi_, \ 158 .start = start_, \ 159 .end = end_, \ 160 .flags = flags_, \ 161 .pgoff = pgoff_, \ 162 .state = VMA_MERGE_START, \ 163 } 164 165 #define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_) \ 166 struct vma_merge_struct name = { \ 167 .mm = vma_->vm_mm, \ 168 .vmi = vmi_, \ 169 .prev = prev_, \ 170 .middle = vma_, \ 171 .next = NULL, \ 172 .start = start_, \ 173 .end = end_, \ 174 .flags = vma_->vm_flags, \ 175 .pgoff = vma_pgoff_offset(vma_, start_), \ 176 .file = vma_->vm_file, \ 177 .anon_vma = vma_->anon_vma, \ 178 .policy = vma_policy(vma_), \ 179 .uffd_ctx = vma_->vm_userfaultfd_ctx, \ 180 .anon_name = anon_vma_name(vma_), \ 181 .state = VMA_MERGE_START, \ 182 } 183 184 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 185 void validate_mm(struct mm_struct *mm); 186 #else 187 #define validate_mm(mm) do { } while (0) 188 #endif 189 190 __must_check int vma_expand(struct vma_merge_struct *vmg); 191 __must_check int vma_shrink(struct vma_iterator *vmi, 192 struct vm_area_struct *vma, 193 unsigned long start, unsigned long end, pgoff_t pgoff); 194 195 static inline int vma_iter_store_gfp(struct vma_iterator *vmi, 196 struct vm_area_struct *vma, gfp_t gfp) 197 198 { 199 if (vmi->mas.status != ma_start && 200 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) 201 vma_iter_invalidate(vmi); 202 203 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); 204 mas_store_gfp(&vmi->mas, vma, gfp); 205 if (unlikely(mas_is_err(&vmi->mas))) 206 return -ENOMEM; 207 208 return 0; 209 } 210 211 int 212 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 213 struct mm_struct *mm, unsigned long start, 214 unsigned long end, struct list_head *uf, bool unlock); 215 216 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, 217 unsigned long start, size_t len, struct list_head *uf, 218 bool unlock); 219 220 void remove_vma(struct vm_area_struct *vma, bool unreachable); 221 222 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, 223 struct vm_area_struct *prev, struct vm_area_struct *next); 224 225 /* We are about to modify the VMA's flags. */ 226 __must_check struct vm_area_struct 227 *vma_modify_flags(struct vma_iterator *vmi, 228 struct vm_area_struct *prev, struct vm_area_struct *vma, 229 unsigned long start, unsigned long end, 230 unsigned long new_flags); 231 232 /* We are about to modify the VMA's flags and/or anon_name. */ 233 __must_check struct vm_area_struct 234 *vma_modify_flags_name(struct vma_iterator *vmi, 235 struct vm_area_struct *prev, 236 struct vm_area_struct *vma, 237 unsigned long start, 238 unsigned long end, 239 unsigned long new_flags, 240 struct anon_vma_name *new_name); 241 242 /* We are about to modify the VMA's memory policy. */ 243 __must_check struct vm_area_struct 244 *vma_modify_policy(struct vma_iterator *vmi, 245 struct vm_area_struct *prev, 246 struct vm_area_struct *vma, 247 unsigned long start, unsigned long end, 248 struct mempolicy *new_pol); 249 250 /* We are about to modify the VMA's flags and/or uffd context. */ 251 __must_check struct vm_area_struct 252 *vma_modify_flags_uffd(struct vma_iterator *vmi, 253 struct vm_area_struct *prev, 254 struct vm_area_struct *vma, 255 unsigned long start, unsigned long end, 256 unsigned long new_flags, 257 struct vm_userfaultfd_ctx new_ctx); 258 259 __must_check struct vm_area_struct 260 *vma_merge_new_range(struct vma_merge_struct *vmg); 261 262 __must_check struct vm_area_struct 263 *vma_merge_extend(struct vma_iterator *vmi, 264 struct vm_area_struct *vma, 265 unsigned long delta); 266 267 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb); 268 269 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb); 270 271 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb, 272 struct vm_area_struct *vma); 273 274 void unlink_file_vma(struct vm_area_struct *vma); 275 276 void vma_link_file(struct vm_area_struct *vma); 277 278 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma); 279 280 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 281 unsigned long addr, unsigned long len, pgoff_t pgoff, 282 bool *need_rmap_locks); 283 284 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma); 285 286 bool vma_needs_dirty_tracking(struct vm_area_struct *vma); 287 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); 288 289 int mm_take_all_locks(struct mm_struct *mm); 290 void mm_drop_all_locks(struct mm_struct *mm); 291 292 unsigned long mmap_region(struct file *file, unsigned long addr, 293 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, 294 struct list_head *uf); 295 296 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma, 297 unsigned long addr, unsigned long request, unsigned long flags); 298 299 unsigned long unmapped_area(struct vm_unmapped_area_info *info); 300 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info); 301 302 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma) 303 { 304 /* 305 * We want to check manually if we can change individual PTEs writable 306 * if we can't do that automatically for all PTEs in a mapping. For 307 * private mappings, that's always the case when we have write 308 * permissions as we properly have to handle COW. 309 */ 310 if (vma->vm_flags & VM_SHARED) 311 return vma_wants_writenotify(vma, vma->vm_page_prot); 312 return !!(vma->vm_flags & VM_WRITE); 313 } 314 315 #ifdef CONFIG_MMU 316 static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) 317 { 318 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); 319 } 320 #endif 321 322 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi, 323 unsigned long min) 324 { 325 return mas_prev(&vmi->mas, min); 326 } 327 328 /* 329 * These three helpers classifies VMAs for virtual memory accounting. 330 */ 331 332 /* 333 * Executable code area - executable, not writable, not stack 334 */ 335 static inline bool is_exec_mapping(vm_flags_t flags) 336 { 337 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC; 338 } 339 340 /* 341 * Stack area (including shadow stacks) 342 * 343 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: 344 * do_mmap() forbids all other combinations. 345 */ 346 static inline bool is_stack_mapping(vm_flags_t flags) 347 { 348 return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK); 349 } 350 351 /* 352 * Data area - private, writable, not stack 353 */ 354 static inline bool is_data_mapping(vm_flags_t flags) 355 { 356 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; 357 } 358 359 360 static inline void vma_iter_config(struct vma_iterator *vmi, 361 unsigned long index, unsigned long last) 362 { 363 __mas_set_range(&vmi->mas, index, last - 1); 364 } 365 366 static inline void vma_iter_reset(struct vma_iterator *vmi) 367 { 368 mas_reset(&vmi->mas); 369 } 370 371 static inline 372 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min) 373 { 374 return mas_prev_range(&vmi->mas, min); 375 } 376 377 static inline 378 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max) 379 { 380 return mas_next_range(&vmi->mas, max); 381 } 382 383 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min, 384 unsigned long max, unsigned long size) 385 { 386 return mas_empty_area(&vmi->mas, min, max - 1, size); 387 } 388 389 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min, 390 unsigned long max, unsigned long size) 391 { 392 return mas_empty_area_rev(&vmi->mas, min, max - 1, size); 393 } 394 395 /* 396 * VMA Iterator functions shared between nommu and mmap 397 */ 398 static inline int vma_iter_prealloc(struct vma_iterator *vmi, 399 struct vm_area_struct *vma) 400 { 401 return mas_preallocate(&vmi->mas, vma, GFP_KERNEL); 402 } 403 404 static inline void vma_iter_clear(struct vma_iterator *vmi) 405 { 406 mas_store_prealloc(&vmi->mas, NULL); 407 } 408 409 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi) 410 { 411 return mas_walk(&vmi->mas); 412 } 413 414 /* Store a VMA with preallocated memory */ 415 static inline void vma_iter_store(struct vma_iterator *vmi, 416 struct vm_area_struct *vma) 417 { 418 419 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 420 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start && 421 vmi->mas.index > vma->vm_start)) { 422 pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n", 423 vmi->mas.index, vma->vm_start, vma->vm_start, 424 vma->vm_end, vmi->mas.index, vmi->mas.last); 425 } 426 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start && 427 vmi->mas.last < vma->vm_start)) { 428 pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n", 429 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end, 430 vmi->mas.index, vmi->mas.last); 431 } 432 #endif 433 434 if (vmi->mas.status != ma_start && 435 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) 436 vma_iter_invalidate(vmi); 437 438 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); 439 mas_store_prealloc(&vmi->mas, vma); 440 } 441 442 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi) 443 { 444 return vmi->mas.index; 445 } 446 447 static inline unsigned long vma_iter_end(struct vma_iterator *vmi) 448 { 449 return vmi->mas.last + 1; 450 } 451 452 static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi, 453 unsigned long count) 454 { 455 return mas_expected_entries(&vmi->mas, count); 456 } 457 458 static inline 459 struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi) 460 { 461 return mas_prev_range(&vmi->mas, 0); 462 } 463 464 /* 465 * Retrieve the next VMA and rewind the iterator to end of the previous VMA, or 466 * if no previous VMA, to index 0. 467 */ 468 static inline 469 struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi, 470 struct vm_area_struct **pprev) 471 { 472 struct vm_area_struct *next = vma_next(vmi); 473 struct vm_area_struct *prev = vma_prev(vmi); 474 475 /* 476 * Consider the case where no previous VMA exists. We advance to the 477 * next VMA, skipping any gap, then rewind to the start of the range. 478 * 479 * If we were to unconditionally advance to the next range we'd wind up 480 * at the next VMA again, so we check to ensure there is a previous VMA 481 * to skip over. 482 */ 483 if (prev) 484 vma_iter_next_range(vmi); 485 486 if (pprev) 487 *pprev = prev; 488 489 return next; 490 } 491 492 #ifdef CONFIG_64BIT 493 494 static inline bool vma_is_sealed(struct vm_area_struct *vma) 495 { 496 return (vma->vm_flags & VM_SEALED); 497 } 498 499 /* 500 * check if a vma is sealed for modification. 501 * return true, if modification is allowed. 502 */ 503 static inline bool can_modify_vma(struct vm_area_struct *vma) 504 { 505 if (unlikely(vma_is_sealed(vma))) 506 return false; 507 508 return true; 509 } 510 511 bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior); 512 513 #else 514 515 static inline bool can_modify_vma(struct vm_area_struct *vma) 516 { 517 return true; 518 } 519 520 static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior) 521 { 522 return true; 523 } 524 525 #endif 526 527 #if defined(CONFIG_STACK_GROWSUP) 528 int expand_upwards(struct vm_area_struct *vma, unsigned long address); 529 #endif 530 531 int expand_downwards(struct vm_area_struct *vma, unsigned long address); 532 533 int __vm_munmap(unsigned long start, size_t len, bool unlock); 534 535 #endif /* __MM_VMA_H */ 536