1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * vma.h 4 * 5 * Core VMA manipulation API implemented in vma.c. 6 */ 7 #ifndef __MM_VMA_H 8 #define __MM_VMA_H 9 10 /* 11 * VMA lock generalization 12 */ 13 struct vma_prepare { 14 struct vm_area_struct *vma; 15 struct vm_area_struct *adj_next; 16 struct file *file; 17 struct address_space *mapping; 18 struct anon_vma *anon_vma; 19 struct vm_area_struct *insert; 20 struct vm_area_struct *remove; 21 struct vm_area_struct *remove2; 22 23 bool skip_vma_uprobe :1; 24 }; 25 26 struct unlink_vma_file_batch { 27 int count; 28 struct vm_area_struct *vmas[8]; 29 }; 30 31 /* 32 * vma munmap operation 33 */ 34 struct vma_munmap_struct { 35 struct vma_iterator *vmi; 36 struct vm_area_struct *vma; /* The first vma to munmap */ 37 struct vm_area_struct *prev; /* vma before the munmap area */ 38 struct vm_area_struct *next; /* vma after the munmap area */ 39 struct list_head *uf; /* Userfaultfd list_head */ 40 unsigned long start; /* Aligned start addr (inclusive) */ 41 unsigned long end; /* Aligned end addr (exclusive) */ 42 unsigned long unmap_start; /* Unmap PTE start */ 43 unsigned long unmap_end; /* Unmap PTE end */ 44 int vma_count; /* Number of vmas that will be removed */ 45 bool unlock; /* Unlock after the munmap */ 46 bool clear_ptes; /* If there are outstanding PTE to be cleared */ 47 /* 2 byte hole */ 48 unsigned long nr_pages; /* Number of pages being removed */ 49 unsigned long locked_vm; /* Number of locked pages */ 50 unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */ 51 unsigned long exec_vm; 52 unsigned long stack_vm; 53 unsigned long data_vm; 54 }; 55 56 enum vma_merge_state { 57 VMA_MERGE_START, 58 VMA_MERGE_ERROR_NOMEM, 59 VMA_MERGE_NOMERGE, 60 VMA_MERGE_SUCCESS, 61 }; 62 63 /* 64 * Describes a VMA merge operation and is threaded throughout it. 65 * 66 * Any of the fields may be mutated by the merge operation, so no guarantees are 67 * made to the contents of this structure after a merge operation has completed. 68 */ 69 struct vma_merge_struct { 70 struct mm_struct *mm; 71 struct vma_iterator *vmi; 72 /* 73 * Adjacent VMAs, any of which may be NULL if not present: 74 * 75 * |------|--------|------| 76 * | prev | middle | next | 77 * |------|--------|------| 78 * 79 * middle may not yet exist in the case of a proposed new VMA being 80 * merged, or it may be an existing VMA. 81 * 82 * next may be assigned by the caller. 83 */ 84 struct vm_area_struct *prev; 85 struct vm_area_struct *middle; 86 struct vm_area_struct *next; 87 /* This is the VMA we ultimately target to become the merged VMA. */ 88 struct vm_area_struct *target; 89 /* 90 * Initially, the start, end, pgoff fields are provided by the caller 91 * and describe the proposed new VMA range, whether modifying an 92 * existing VMA (which will be 'middle'), or adding a new one. 93 * 94 * During the merge process these fields are updated to describe the new 95 * range _including those VMAs which will be merged_. 96 */ 97 unsigned long start; 98 unsigned long end; 99 pgoff_t pgoff; 100 101 union { 102 /* Temporary while VMA flags are being converted. */ 103 vm_flags_t vm_flags; 104 vma_flags_t vma_flags; 105 }; 106 struct file *file; 107 struct anon_vma *anon_vma; 108 struct mempolicy *policy; 109 struct vm_userfaultfd_ctx uffd_ctx; 110 struct anon_vma_name *anon_name; 111 enum vma_merge_state state; 112 113 /* If copied from (i.e. mremap()'d) the VMA from which we are copying. */ 114 struct vm_area_struct *copied_from; 115 116 /* Flags which callers can use to modify merge behaviour: */ 117 118 /* 119 * If we can expand, simply do so. We know there is nothing to merge to 120 * the right. Does not reset state upon failure to merge. The VMA 121 * iterator is assumed to be positioned at the previous VMA, rather than 122 * at the gap. 123 */ 124 bool just_expand :1; 125 126 /* 127 * If a merge is possible, but an OOM error occurs, give up and don't 128 * execute the merge, returning NULL. 129 */ 130 bool give_up_on_oom :1; 131 132 /* 133 * If set, skip uprobe_mmap upon merged vma. 134 */ 135 bool skip_vma_uprobe :1; 136 137 /* Internal flags set during merge process: */ 138 139 /* 140 * Internal flag indicating the merge increases vmg->middle->vm_start 141 * (and thereby, vmg->prev->vm_end). 142 */ 143 bool __adjust_middle_start :1; 144 /* 145 * Internal flag indicating the merge decreases vmg->next->vm_start 146 * (and thereby, vmg->middle->vm_end). 147 */ 148 bool __adjust_next_start :1; 149 /* 150 * Internal flag used during the merge operation to indicate we will 151 * remove vmg->middle. 152 */ 153 bool __remove_middle :1; 154 /* 155 * Internal flag used during the merge operation to indicate we will 156 * remove vmg->next. 157 */ 158 bool __remove_next :1; 159 160 }; 161 162 struct unmap_desc { 163 struct ma_state *mas; /* the maple state point to the first vma */ 164 struct vm_area_struct *first; /* The first vma */ 165 unsigned long pg_start; /* The first pagetable address to free (floor) */ 166 unsigned long pg_end; /* The last pagetable address to free (ceiling) */ 167 unsigned long vma_start; /* The min vma address */ 168 unsigned long vma_end; /* The max vma address */ 169 unsigned long tree_end; /* Maximum for the vma tree search */ 170 unsigned long tree_reset; /* Where to reset the vma tree walk */ 171 bool mm_wr_locked; /* If the mmap write lock is held */ 172 }; 173 174 /* 175 * unmap_all_init() - Initialize unmap_desc to remove all vmas, point the 176 * pg_start and pg_end to a safe location. 177 */ 178 static inline void unmap_all_init(struct unmap_desc *unmap, 179 struct vma_iterator *vmi, struct vm_area_struct *vma) 180 { 181 unmap->mas = &vmi->mas; 182 unmap->first = vma; 183 unmap->pg_start = FIRST_USER_ADDRESS; 184 unmap->pg_end = USER_PGTABLES_CEILING; 185 unmap->vma_start = 0; 186 unmap->vma_end = ULONG_MAX; 187 unmap->tree_end = ULONG_MAX; 188 unmap->tree_reset = vma->vm_end; 189 unmap->mm_wr_locked = false; 190 } 191 192 /* 193 * unmap_pgtable_init() - Initialize unmap_desc to remove all page tables within 194 * the user range. 195 * 196 * ARM can have mappings outside of vmas. 197 * See: e2cdef8c847b4 ("[PATCH] freepgt: free_pgtables from FIRST_USER_ADDRESS") 198 * 199 * ARM LPAE uses page table mappings beyond the USER_PGTABLES_CEILING 200 * See: CONFIG_ARM_LPAE in arch/arm/include/asm/pgtable.h 201 */ 202 static inline void unmap_pgtable_init(struct unmap_desc *unmap, 203 struct vma_iterator *vmi) 204 { 205 vma_iter_set(vmi, unmap->tree_reset); 206 unmap->vma_start = FIRST_USER_ADDRESS; 207 unmap->vma_end = USER_PGTABLES_CEILING; 208 unmap->tree_end = USER_PGTABLES_CEILING; 209 } 210 211 #define UNMAP_STATE(name, _vmi, _vma, _vma_start, _vma_end, _prev, _next) \ 212 struct unmap_desc name = { \ 213 .mas = &(_vmi)->mas, \ 214 .first = _vma, \ 215 .pg_start = _prev ? ((struct vm_area_struct *)_prev)->vm_end : \ 216 FIRST_USER_ADDRESS, \ 217 .pg_end = _next ? ((struct vm_area_struct *)_next)->vm_start : \ 218 USER_PGTABLES_CEILING, \ 219 .vma_start = _vma_start, \ 220 .vma_end = _vma_end, \ 221 .tree_end = _next ? \ 222 ((struct vm_area_struct *)_next)->vm_start : \ 223 USER_PGTABLES_CEILING, \ 224 .tree_reset = _vma->vm_end, \ 225 .mm_wr_locked = true, \ 226 } 227 228 static inline bool vmg_nomem(struct vma_merge_struct *vmg) 229 { 230 return vmg->state == VMA_MERGE_ERROR_NOMEM; 231 } 232 233 /* Assumes addr >= vma->vm_start. */ 234 static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma, 235 unsigned long addr) 236 { 237 return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start); 238 } 239 240 #define VMG_STATE(name, mm_, vmi_, start_, end_, vma_flags_, pgoff_) \ 241 struct vma_merge_struct name = { \ 242 .mm = mm_, \ 243 .vmi = vmi_, \ 244 .start = start_, \ 245 .end = end_, \ 246 .vma_flags = vma_flags_, \ 247 .pgoff = pgoff_, \ 248 .state = VMA_MERGE_START, \ 249 } 250 251 #define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_) \ 252 struct vma_merge_struct name = { \ 253 .mm = vma_->vm_mm, \ 254 .vmi = vmi_, \ 255 .prev = prev_, \ 256 .middle = vma_, \ 257 .next = NULL, \ 258 .start = start_, \ 259 .end = end_, \ 260 .vm_flags = vma_->vm_flags, \ 261 .pgoff = vma_pgoff_offset(vma_, start_), \ 262 .file = vma_->vm_file, \ 263 .anon_vma = vma_->anon_vma, \ 264 .policy = vma_policy(vma_), \ 265 .uffd_ctx = vma_->vm_userfaultfd_ctx, \ 266 .anon_name = anon_vma_name(vma_), \ 267 .state = VMA_MERGE_START, \ 268 } 269 270 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 271 void validate_mm(struct mm_struct *mm); 272 #else 273 #define validate_mm(mm) do { } while (0) 274 #endif 275 276 __must_check int vma_expand(struct vma_merge_struct *vmg); 277 __must_check int vma_shrink(struct vma_iterator *vmi, 278 struct vm_area_struct *vma, 279 unsigned long start, unsigned long end, pgoff_t pgoff); 280 281 static inline int vma_iter_store_gfp(struct vma_iterator *vmi, 282 struct vm_area_struct *vma, gfp_t gfp) 283 284 { 285 if (vmi->mas.status != ma_start && 286 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) 287 vma_iter_invalidate(vmi); 288 289 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); 290 mas_store_gfp(&vmi->mas, vma, gfp); 291 if (unlikely(mas_is_err(&vmi->mas))) 292 return -ENOMEM; 293 294 vma_mark_attached(vma); 295 return 0; 296 } 297 298 /* 299 * Temporary helper function for stacked mmap handlers which specify 300 * f_op->mmap() but which might have an underlying file system which implements 301 * f_op->mmap_prepare(). 302 */ 303 static inline void set_vma_from_desc(struct vm_area_struct *vma, 304 struct vm_area_desc *desc) 305 { 306 /* 307 * Since we're invoking .mmap_prepare() despite having a partially 308 * established VMA, we must take care to handle setting fields 309 * correctly. 310 */ 311 312 /* Mutable fields. Populated with initial state. */ 313 vma->vm_pgoff = desc->pgoff; 314 if (desc->vm_file != vma->vm_file) 315 vma_set_file(vma, desc->vm_file); 316 vma->flags = desc->vma_flags; 317 vma->vm_page_prot = desc->page_prot; 318 319 /* User-defined fields. */ 320 vma->vm_ops = desc->vm_ops; 321 vma->vm_private_data = desc->private_data; 322 } 323 324 int 325 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 326 struct mm_struct *mm, unsigned long start, 327 unsigned long end, struct list_head *uf, bool unlock); 328 329 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, 330 unsigned long start, size_t len, struct list_head *uf, 331 bool unlock); 332 333 void remove_vma(struct vm_area_struct *vma); 334 void unmap_region(struct unmap_desc *unmap); 335 336 /** 337 * vma_modify_flags() - Perform any necessary split/merge in preparation for 338 * setting VMA flags to *@vm_flags in the range @start to @end contained within 339 * @vma. 340 * @vmi: Valid VMA iterator positioned at @vma. 341 * @prev: The VMA immediately prior to @vma or NULL if @vma is the first. 342 * @vma: The VMA containing the range @start to @end to be updated. 343 * @start: The start of the range to update. May be offset within @vma. 344 * @end: The exclusive end of the range to update, may be offset within @vma. 345 * @vm_flags_ptr: A pointer to the VMA flags that the @start to @end range is 346 * about to be set to. On merge, this will be updated to include sticky flags. 347 * 348 * IMPORTANT: The actual modification being requested here is NOT applied, 349 * rather the VMA is perhaps split, perhaps merged to accommodate the change, 350 * and the caller is expected to perform the actual modification. 351 * 352 * In order to account for sticky VMA flags, the @vm_flags_ptr parameter points 353 * to the requested flags which are then updated so the caller, should they 354 * overwrite any existing flags, correctly retains these. 355 * 356 * Returns: A VMA which contains the range @start to @end ready to have its 357 * flags altered to *@vm_flags. 358 */ 359 __must_check struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi, 360 struct vm_area_struct *prev, struct vm_area_struct *vma, 361 unsigned long start, unsigned long end, 362 vm_flags_t *vm_flags_ptr); 363 364 /** 365 * vma_modify_name() - Perform any necessary split/merge in preparation for 366 * setting anonymous VMA name to @new_name in the range @start to @end contained 367 * within @vma. 368 * @vmi: Valid VMA iterator positioned at @vma. 369 * @prev: The VMA immediately prior to @vma or NULL if @vma is the first. 370 * @vma: The VMA containing the range @start to @end to be updated. 371 * @start: The start of the range to update. May be offset within @vma. 372 * @end: The exclusive end of the range to update, may be offset within @vma. 373 * @new_name: The anonymous VMA name that the @start to @end range is about to 374 * be set to. 375 * 376 * IMPORTANT: The actual modification being requested here is NOT applied, 377 * rather the VMA is perhaps split, perhaps merged to accommodate the change, 378 * and the caller is expected to perform the actual modification. 379 * 380 * Returns: A VMA which contains the range @start to @end ready to have its 381 * anonymous VMA name changed to @new_name. 382 */ 383 __must_check struct vm_area_struct *vma_modify_name(struct vma_iterator *vmi, 384 struct vm_area_struct *prev, struct vm_area_struct *vma, 385 unsigned long start, unsigned long end, 386 struct anon_vma_name *new_name); 387 388 /** 389 * vma_modify_policy() - Perform any necessary split/merge in preparation for 390 * setting NUMA policy to @new_pol in the range @start to @end contained 391 * within @vma. 392 * @vmi: Valid VMA iterator positioned at @vma. 393 * @prev: The VMA immediately prior to @vma or NULL if @vma is the first. 394 * @vma: The VMA containing the range @start to @end to be updated. 395 * @start: The start of the range to update. May be offset within @vma. 396 * @end: The exclusive end of the range to update, may be offset within @vma. 397 * @new_pol: The NUMA policy that the @start to @end range is about to be set 398 * to. 399 * 400 * IMPORTANT: The actual modification being requested here is NOT applied, 401 * rather the VMA is perhaps split, perhaps merged to accommodate the change, 402 * and the caller is expected to perform the actual modification. 403 * 404 * Returns: A VMA which contains the range @start to @end ready to have its 405 * NUMA policy changed to @new_pol. 406 */ 407 __must_check struct vm_area_struct *vma_modify_policy(struct vma_iterator *vmi, 408 struct vm_area_struct *prev, struct vm_area_struct *vma, 409 unsigned long start, unsigned long end, 410 struct mempolicy *new_pol); 411 412 /** 413 * vma_modify_flags_uffd() - Perform any necessary split/merge in preparation for 414 * setting VMA flags to @vm_flags and UFFD context to @new_ctx in the range 415 * @start to @end contained within @vma. 416 * @vmi: Valid VMA iterator positioned at @vma. 417 * @prev: The VMA immediately prior to @vma or NULL if @vma is the first. 418 * @vma: The VMA containing the range @start to @end to be updated. 419 * @start: The start of the range to update. May be offset within @vma. 420 * @end: The exclusive end of the range to update, may be offset within @vma. 421 * @vm_flags: The VMA flags that the @start to @end range is about to be set to. 422 * @new_ctx: The userfaultfd context that the @start to @end range is about to 423 * be set to. 424 * @give_up_on_oom: If an out of memory condition occurs on merge, simply give 425 * up on it and treat the merge as best-effort. 426 * 427 * IMPORTANT: The actual modification being requested here is NOT applied, 428 * rather the VMA is perhaps split, perhaps merged to accommodate the change, 429 * and the caller is expected to perform the actual modification. 430 * 431 * Returns: A VMA which contains the range @start to @end ready to have its VMA 432 * flags changed to @vm_flags and its userfaultfd context changed to @new_ctx. 433 */ 434 __must_check struct vm_area_struct *vma_modify_flags_uffd(struct vma_iterator *vmi, 435 struct vm_area_struct *prev, struct vm_area_struct *vma, 436 unsigned long start, unsigned long end, vm_flags_t vm_flags, 437 struct vm_userfaultfd_ctx new_ctx, bool give_up_on_oom); 438 439 __must_check struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg); 440 441 __must_check struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi, 442 struct vm_area_struct *vma, unsigned long delta); 443 444 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb); 445 446 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb); 447 448 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb, 449 struct vm_area_struct *vma); 450 451 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 452 unsigned long addr, unsigned long len, pgoff_t pgoff, 453 bool *need_rmap_locks); 454 455 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma); 456 457 bool vma_needs_dirty_tracking(struct vm_area_struct *vma); 458 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); 459 460 int mm_take_all_locks(struct mm_struct *mm); 461 void mm_drop_all_locks(struct mm_struct *mm); 462 463 unsigned long mmap_region(struct file *file, unsigned long addr, 464 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, 465 struct list_head *uf); 466 467 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma, 468 unsigned long addr, unsigned long request, 469 vma_flags_t vma_flags); 470 471 unsigned long unmapped_area(struct vm_unmapped_area_info *info); 472 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info); 473 474 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma) 475 { 476 /* 477 * We want to check manually if we can change individual PTEs writable 478 * if we can't do that automatically for all PTEs in a mapping. For 479 * private mappings, that's always the case when we have write 480 * permissions as we properly have to handle COW. 481 */ 482 if (vma->vm_flags & VM_SHARED) 483 return vma_wants_writenotify(vma, vma->vm_page_prot); 484 return !!(vma->vm_flags & VM_WRITE); 485 } 486 487 #ifdef CONFIG_MMU 488 static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, vm_flags_t vm_flags) 489 { 490 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); 491 } 492 #endif 493 494 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi, 495 unsigned long min) 496 { 497 return mas_prev(&vmi->mas, min); 498 } 499 500 /* 501 * These three helpers classifies VMAs for virtual memory accounting. 502 */ 503 504 /* 505 * Executable code area - executable, not writable, not stack 506 */ 507 static inline bool is_exec_mapping(vm_flags_t flags) 508 { 509 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC; 510 } 511 512 /* 513 * Stack area (including shadow stacks) 514 * 515 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: 516 * do_mmap() forbids all other combinations. 517 */ 518 static inline bool is_stack_mapping(vm_flags_t flags) 519 { 520 return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK); 521 } 522 523 /* 524 * Data area - private, writable, not stack 525 */ 526 static inline bool is_data_mapping(vm_flags_t flags) 527 { 528 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; 529 } 530 531 static inline bool is_data_mapping_vma_flags(const vma_flags_t *vma_flags) 532 { 533 const vma_flags_t mask = vma_flags_and(vma_flags, 534 VMA_WRITE_BIT, VMA_SHARED_BIT, VMA_STACK_BIT); 535 536 return vma_flags_same(&mask, VMA_WRITE_BIT); 537 } 538 539 static inline void vma_iter_config(struct vma_iterator *vmi, 540 unsigned long index, unsigned long last) 541 { 542 __mas_set_range(&vmi->mas, index, last - 1); 543 } 544 545 static inline void vma_iter_reset(struct vma_iterator *vmi) 546 { 547 mas_reset(&vmi->mas); 548 } 549 550 static inline 551 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min) 552 { 553 return mas_prev_range(&vmi->mas, min); 554 } 555 556 static inline 557 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max) 558 { 559 return mas_next_range(&vmi->mas, max); 560 } 561 562 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min, 563 unsigned long max, unsigned long size) 564 { 565 return mas_empty_area(&vmi->mas, min, max - 1, size); 566 } 567 568 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min, 569 unsigned long max, unsigned long size) 570 { 571 return mas_empty_area_rev(&vmi->mas, min, max - 1, size); 572 } 573 574 /* 575 * VMA Iterator functions shared between nommu and mmap 576 */ 577 static inline int vma_iter_prealloc(struct vma_iterator *vmi, 578 struct vm_area_struct *vma) 579 { 580 return mas_preallocate(&vmi->mas, vma, GFP_KERNEL); 581 } 582 583 static inline void vma_iter_clear(struct vma_iterator *vmi) 584 { 585 mas_store_prealloc(&vmi->mas, NULL); 586 } 587 588 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi) 589 { 590 return mas_walk(&vmi->mas); 591 } 592 593 /* Store a VMA with preallocated memory */ 594 static inline void vma_iter_store_overwrite(struct vma_iterator *vmi, 595 struct vm_area_struct *vma) 596 { 597 vma_assert_attached(vma); 598 599 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 600 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start && 601 vmi->mas.index > vma->vm_start)) { 602 pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n", 603 vmi->mas.index, vma->vm_start, vma->vm_start, 604 vma->vm_end, vmi->mas.index, vmi->mas.last); 605 } 606 if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start && 607 vmi->mas.last < vma->vm_start)) { 608 pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n", 609 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end, 610 vmi->mas.index, vmi->mas.last); 611 } 612 #endif 613 614 if (vmi->mas.status != ma_start && 615 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) 616 vma_iter_invalidate(vmi); 617 618 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); 619 mas_store_prealloc(&vmi->mas, vma); 620 } 621 622 static inline void vma_iter_store_new(struct vma_iterator *vmi, 623 struct vm_area_struct *vma) 624 { 625 vma_mark_attached(vma); 626 vma_iter_store_overwrite(vmi, vma); 627 } 628 629 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi) 630 { 631 return vmi->mas.index; 632 } 633 634 static inline unsigned long vma_iter_end(struct vma_iterator *vmi) 635 { 636 return vmi->mas.last + 1; 637 } 638 639 static inline 640 struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi) 641 { 642 return mas_prev_range(&vmi->mas, 0); 643 } 644 645 /* 646 * Retrieve the next VMA and rewind the iterator to end of the previous VMA, or 647 * if no previous VMA, to index 0. 648 */ 649 static inline 650 struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi, 651 struct vm_area_struct **pprev) 652 { 653 struct vm_area_struct *next = vma_next(vmi); 654 struct vm_area_struct *prev = vma_prev(vmi); 655 656 /* 657 * Consider the case where no previous VMA exists. We advance to the 658 * next VMA, skipping any gap, then rewind to the start of the range. 659 * 660 * If we were to unconditionally advance to the next range we'd wind up 661 * at the next VMA again, so we check to ensure there is a previous VMA 662 * to skip over. 663 */ 664 if (prev) 665 vma_iter_next_range(vmi); 666 667 if (pprev) 668 *pprev = prev; 669 670 return next; 671 } 672 673 #ifdef CONFIG_64BIT 674 static inline bool vma_is_sealed(struct vm_area_struct *vma) 675 { 676 return (vma->vm_flags & VM_SEALED); 677 } 678 #else 679 static inline bool vma_is_sealed(struct vm_area_struct *vma) 680 { 681 return false; 682 } 683 #endif 684 685 #if defined(CONFIG_STACK_GROWSUP) 686 int expand_upwards(struct vm_area_struct *vma, unsigned long address); 687 #endif 688 689 int expand_downwards(struct vm_area_struct *vma, unsigned long address); 690 691 int __vm_munmap(unsigned long start, size_t len, bool unlock); 692 693 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma); 694 695 /* vma_init.h, shared between CONFIG_MMU and nommu. */ 696 void __init vma_state_init(void); 697 struct vm_area_struct *vm_area_alloc(struct mm_struct *mm); 698 struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig); 699 void vm_area_free(struct vm_area_struct *vma); 700 701 /* vma_exec.c */ 702 #ifdef CONFIG_MMU 703 int create_init_stack_vma(struct mm_struct *mm, struct vm_area_struct **vmap, 704 unsigned long *top_mem_p); 705 int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift); 706 #endif 707 708 #endif /* __MM_VMA_H */ 709