1 // SPDX-License-Identifier: GPL-2.0-or-later 2 3 /* 4 * VMA-specific functions. 5 */ 6 7 #include "vma_internal.h" 8 #include "vma.h" 9 10 struct mmap_state { 11 struct mm_struct *mm; 12 struct vma_iterator *vmi; 13 14 unsigned long addr; 15 unsigned long end; 16 pgoff_t pgoff; 17 unsigned long pglen; 18 vm_flags_t vm_flags; 19 struct file *file; 20 pgprot_t page_prot; 21 22 /* User-defined fields, perhaps updated by .mmap_prepare(). */ 23 const struct vm_operations_struct *vm_ops; 24 void *vm_private_data; 25 26 unsigned long charged; 27 28 struct vm_area_struct *prev; 29 struct vm_area_struct *next; 30 31 /* Unmapping state. */ 32 struct vma_munmap_struct vms; 33 struct ma_state mas_detach; 34 struct maple_tree mt_detach; 35 36 /* Determine if we can check KSM flags early in mmap() logic. */ 37 bool check_ksm_early :1; 38 /* If we map new, hold the file rmap lock on mapping. */ 39 bool hold_file_rmap_lock :1; 40 }; 41 42 #define MMAP_STATE(name, mm_, vmi_, addr_, len_, pgoff_, vm_flags_, file_) \ 43 struct mmap_state name = { \ 44 .mm = mm_, \ 45 .vmi = vmi_, \ 46 .addr = addr_, \ 47 .end = (addr_) + (len_), \ 48 .pgoff = pgoff_, \ 49 .pglen = PHYS_PFN(len_), \ 50 .vm_flags = vm_flags_, \ 51 .file = file_, \ 52 .page_prot = vm_get_page_prot(vm_flags_), \ 53 } 54 55 #define VMG_MMAP_STATE(name, map_, vma_) \ 56 struct vma_merge_struct name = { \ 57 .mm = (map_)->mm, \ 58 .vmi = (map_)->vmi, \ 59 .start = (map_)->addr, \ 60 .end = (map_)->end, \ 61 .vm_flags = (map_)->vm_flags, \ 62 .pgoff = (map_)->pgoff, \ 63 .file = (map_)->file, \ 64 .prev = (map_)->prev, \ 65 .middle = vma_, \ 66 .next = (vma_) ? NULL : (map_)->next, \ 67 .state = VMA_MERGE_START, \ 68 } 69 70 /* Was this VMA ever forked from a parent, i.e. maybe contains CoW mappings? */ 71 static bool vma_is_fork_child(struct vm_area_struct *vma) 72 { 73 /* 74 * The list_is_singular() test is to avoid merging VMA cloned from 75 * parents. This can improve scalability caused by the anon_vma root 76 * lock. 77 */ 78 return vma && vma->anon_vma && !list_is_singular(&vma->anon_vma_chain); 79 } 80 81 static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next) 82 { 83 struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev; 84 85 if (!mpol_equal(vmg->policy, vma_policy(vma))) 86 return false; 87 if ((vma->vm_flags ^ vmg->vm_flags) & ~VM_IGNORE_MERGE) 88 return false; 89 if (vma->vm_file != vmg->file) 90 return false; 91 if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx)) 92 return false; 93 if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name)) 94 return false; 95 return true; 96 } 97 98 static bool is_mergeable_anon_vma(struct vma_merge_struct *vmg, bool merge_next) 99 { 100 struct vm_area_struct *tgt = merge_next ? vmg->next : vmg->prev; 101 struct vm_area_struct *src = vmg->middle; /* existing merge case. */ 102 struct anon_vma *tgt_anon = tgt->anon_vma; 103 struct anon_vma *src_anon = vmg->anon_vma; 104 105 /* 106 * We _can_ have !src, vmg->anon_vma via copy_vma(). In this instance we 107 * will remove the existing VMA's anon_vma's so there's no scalability 108 * concerns. 109 */ 110 VM_WARN_ON(src && src_anon != src->anon_vma); 111 112 /* Case 1 - we will dup_anon_vma() from src into tgt. */ 113 if (!tgt_anon && src_anon) { 114 struct vm_area_struct *copied_from = vmg->copied_from; 115 116 if (vma_is_fork_child(src)) 117 return false; 118 if (vma_is_fork_child(copied_from)) 119 return false; 120 121 return true; 122 } 123 /* Case 2 - we will simply use tgt's anon_vma. */ 124 if (tgt_anon && !src_anon) 125 return !vma_is_fork_child(tgt); 126 /* Case 3 - the anon_vma's are already shared. */ 127 return src_anon == tgt_anon; 128 } 129 130 /* 131 * init_multi_vma_prep() - Initializer for struct vma_prepare 132 * @vp: The vma_prepare struct 133 * @vma: The vma that will be altered once locked 134 * @vmg: The merge state that will be used to determine adjustment and VMA 135 * removal. 136 */ 137 static void init_multi_vma_prep(struct vma_prepare *vp, 138 struct vm_area_struct *vma, 139 struct vma_merge_struct *vmg) 140 { 141 struct vm_area_struct *adjust; 142 struct vm_area_struct **remove = &vp->remove; 143 144 memset(vp, 0, sizeof(struct vma_prepare)); 145 vp->vma = vma; 146 vp->anon_vma = vma->anon_vma; 147 148 if (vmg && vmg->__remove_middle) { 149 *remove = vmg->middle; 150 remove = &vp->remove2; 151 } 152 if (vmg && vmg->__remove_next) 153 *remove = vmg->next; 154 155 if (vmg && vmg->__adjust_middle_start) 156 adjust = vmg->middle; 157 else if (vmg && vmg->__adjust_next_start) 158 adjust = vmg->next; 159 else 160 adjust = NULL; 161 162 vp->adj_next = adjust; 163 if (!vp->anon_vma && adjust) 164 vp->anon_vma = adjust->anon_vma; 165 166 VM_WARN_ON(vp->anon_vma && adjust && adjust->anon_vma && 167 vp->anon_vma != adjust->anon_vma); 168 169 vp->file = vma->vm_file; 170 if (vp->file) 171 vp->mapping = vma->vm_file->f_mapping; 172 173 if (vmg && vmg->skip_vma_uprobe) 174 vp->skip_vma_uprobe = true; 175 } 176 177 /* 178 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 179 * in front of (at a lower virtual address and file offset than) the vma. 180 * 181 * We cannot merge two vmas if they have differently assigned (non-NULL) 182 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 183 * 184 * We don't check here for the merged mmap wrapping around the end of pagecache 185 * indices (16TB on ia32) because do_mmap() does not permit mmap's which 186 * wrap, nor mmaps which cover the final page at index -1UL. 187 * 188 * We assume the vma may be removed as part of the merge. 189 */ 190 static bool can_vma_merge_before(struct vma_merge_struct *vmg) 191 { 192 pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start); 193 194 if (is_mergeable_vma(vmg, /* merge_next = */ true) && 195 is_mergeable_anon_vma(vmg, /* merge_next = */ true)) { 196 if (vmg->next->vm_pgoff == vmg->pgoff + pglen) 197 return true; 198 } 199 200 return false; 201 } 202 203 /* 204 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 205 * beyond (at a higher virtual address and file offset than) the vma. 206 * 207 * We cannot merge two vmas if they have differently assigned (non-NULL) 208 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 209 * 210 * We assume that vma is not removed as part of the merge. 211 */ 212 static bool can_vma_merge_after(struct vma_merge_struct *vmg) 213 { 214 if (is_mergeable_vma(vmg, /* merge_next = */ false) && 215 is_mergeable_anon_vma(vmg, /* merge_next = */ false)) { 216 if (vmg->prev->vm_pgoff + vma_pages(vmg->prev) == vmg->pgoff) 217 return true; 218 } 219 return false; 220 } 221 222 static void __vma_link_file(struct vm_area_struct *vma, 223 struct address_space *mapping) 224 { 225 if (vma_is_shared_maywrite(vma)) 226 mapping_allow_writable(mapping); 227 228 flush_dcache_mmap_lock(mapping); 229 vma_interval_tree_insert(vma, &mapping->i_mmap); 230 flush_dcache_mmap_unlock(mapping); 231 } 232 233 /* 234 * Requires inode->i_mapping->i_mmap_rwsem 235 */ 236 static void __remove_shared_vm_struct(struct vm_area_struct *vma, 237 struct address_space *mapping) 238 { 239 if (vma_is_shared_maywrite(vma)) 240 mapping_unmap_writable(mapping); 241 242 flush_dcache_mmap_lock(mapping); 243 vma_interval_tree_remove(vma, &mapping->i_mmap); 244 flush_dcache_mmap_unlock(mapping); 245 } 246 247 /* 248 * vma has some anon_vma assigned, and is already inserted on that 249 * anon_vma's interval trees. 250 * 251 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the 252 * vma must be removed from the anon_vma's interval trees using 253 * anon_vma_interval_tree_pre_update_vma(). 254 * 255 * After the update, the vma will be reinserted using 256 * anon_vma_interval_tree_post_update_vma(). 257 * 258 * The entire update must be protected by exclusive mmap_lock and by 259 * the root anon_vma's mutex. 260 */ 261 static void 262 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) 263 { 264 struct anon_vma_chain *avc; 265 266 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 267 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); 268 } 269 270 static void 271 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) 272 { 273 struct anon_vma_chain *avc; 274 275 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 276 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); 277 } 278 279 /* 280 * vma_prepare() - Helper function for handling locking VMAs prior to altering 281 * @vp: The initialized vma_prepare struct 282 */ 283 static void vma_prepare(struct vma_prepare *vp) 284 { 285 if (vp->file) { 286 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end); 287 288 if (vp->adj_next) 289 uprobe_munmap(vp->adj_next, vp->adj_next->vm_start, 290 vp->adj_next->vm_end); 291 292 i_mmap_lock_write(vp->mapping); 293 if (vp->insert && vp->insert->vm_file) { 294 /* 295 * Put into interval tree now, so instantiated pages 296 * are visible to arm/parisc __flush_dcache_page 297 * throughout; but we cannot insert into address 298 * space until vma start or end is updated. 299 */ 300 __vma_link_file(vp->insert, 301 vp->insert->vm_file->f_mapping); 302 } 303 } 304 305 if (vp->anon_vma) { 306 anon_vma_lock_write(vp->anon_vma); 307 anon_vma_interval_tree_pre_update_vma(vp->vma); 308 if (vp->adj_next) 309 anon_vma_interval_tree_pre_update_vma(vp->adj_next); 310 } 311 312 if (vp->file) { 313 flush_dcache_mmap_lock(vp->mapping); 314 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap); 315 if (vp->adj_next) 316 vma_interval_tree_remove(vp->adj_next, 317 &vp->mapping->i_mmap); 318 } 319 320 } 321 322 /* 323 * vma_complete- Helper function for handling the unlocking after altering VMAs, 324 * or for inserting a VMA. 325 * 326 * @vp: The vma_prepare struct 327 * @vmi: The vma iterator 328 * @mm: The mm_struct 329 */ 330 static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi, 331 struct mm_struct *mm) 332 { 333 if (vp->file) { 334 if (vp->adj_next) 335 vma_interval_tree_insert(vp->adj_next, 336 &vp->mapping->i_mmap); 337 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap); 338 flush_dcache_mmap_unlock(vp->mapping); 339 } 340 341 if (vp->remove && vp->file) { 342 __remove_shared_vm_struct(vp->remove, vp->mapping); 343 if (vp->remove2) 344 __remove_shared_vm_struct(vp->remove2, vp->mapping); 345 } else if (vp->insert) { 346 /* 347 * split_vma has split insert from vma, and needs 348 * us to insert it before dropping the locks 349 * (it may either follow vma or precede it). 350 */ 351 vma_iter_store_new(vmi, vp->insert); 352 mm->map_count++; 353 } 354 355 if (vp->anon_vma) { 356 anon_vma_interval_tree_post_update_vma(vp->vma); 357 if (vp->adj_next) 358 anon_vma_interval_tree_post_update_vma(vp->adj_next); 359 anon_vma_unlock_write(vp->anon_vma); 360 } 361 362 if (vp->file) { 363 i_mmap_unlock_write(vp->mapping); 364 365 if (!vp->skip_vma_uprobe) { 366 uprobe_mmap(vp->vma); 367 368 if (vp->adj_next) 369 uprobe_mmap(vp->adj_next); 370 } 371 } 372 373 if (vp->remove) { 374 again: 375 vma_mark_detached(vp->remove); 376 if (vp->file) { 377 uprobe_munmap(vp->remove, vp->remove->vm_start, 378 vp->remove->vm_end); 379 fput(vp->file); 380 } 381 if (vp->remove->anon_vma) 382 anon_vma_merge(vp->vma, vp->remove); 383 mm->map_count--; 384 mpol_put(vma_policy(vp->remove)); 385 if (!vp->remove2) 386 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end); 387 vm_area_free(vp->remove); 388 389 /* 390 * In mprotect's case 6 (see comments on vma_merge), 391 * we are removing both mid and next vmas 392 */ 393 if (vp->remove2) { 394 vp->remove = vp->remove2; 395 vp->remove2 = NULL; 396 goto again; 397 } 398 } 399 if (vp->insert && vp->file) 400 uprobe_mmap(vp->insert); 401 } 402 403 /* 404 * init_vma_prep() - Initializer wrapper for vma_prepare struct 405 * @vp: The vma_prepare struct 406 * @vma: The vma that will be altered once locked 407 */ 408 static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma) 409 { 410 init_multi_vma_prep(vp, vma, NULL); 411 } 412 413 /* 414 * Can the proposed VMA be merged with the left (previous) VMA taking into 415 * account the start position of the proposed range. 416 */ 417 static bool can_vma_merge_left(struct vma_merge_struct *vmg) 418 419 { 420 return vmg->prev && vmg->prev->vm_end == vmg->start && 421 can_vma_merge_after(vmg); 422 } 423 424 /* 425 * Can the proposed VMA be merged with the right (next) VMA taking into 426 * account the end position of the proposed range. 427 * 428 * In addition, if we can merge with the left VMA, ensure that left and right 429 * anon_vma's are also compatible. 430 */ 431 static bool can_vma_merge_right(struct vma_merge_struct *vmg, 432 bool can_merge_left) 433 { 434 struct vm_area_struct *next = vmg->next; 435 struct vm_area_struct *prev; 436 437 if (!next || vmg->end != next->vm_start || !can_vma_merge_before(vmg)) 438 return false; 439 440 if (!can_merge_left) 441 return true; 442 443 /* 444 * If we can merge with prev (left) and next (right), indicating that 445 * each VMA's anon_vma is compatible with the proposed anon_vma, this 446 * does not mean prev and next are compatible with EACH OTHER. 447 * 448 * We therefore check this in addition to mergeability to either side. 449 */ 450 prev = vmg->prev; 451 return !prev->anon_vma || !next->anon_vma || 452 prev->anon_vma == next->anon_vma; 453 } 454 455 /* 456 * Close a vm structure and free it. 457 */ 458 void remove_vma(struct vm_area_struct *vma) 459 { 460 might_sleep(); 461 vma_close(vma); 462 if (vma->vm_file) 463 fput(vma->vm_file); 464 mpol_put(vma_policy(vma)); 465 vm_area_free(vma); 466 } 467 468 /* 469 * Get rid of page table information in the indicated region. 470 * 471 * Called with the mm semaphore held. 472 */ 473 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, 474 struct vm_area_struct *prev, struct vm_area_struct *next) 475 { 476 struct mm_struct *mm = vma->vm_mm; 477 struct mmu_gather tlb; 478 479 tlb_gather_mmu(&tlb, mm); 480 update_hiwater_rss(mm); 481 unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end); 482 mas_set(mas, vma->vm_end); 483 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 484 next ? next->vm_start : USER_PGTABLES_CEILING, 485 /* mm_wr_locked = */ true); 486 tlb_finish_mmu(&tlb); 487 } 488 489 /* 490 * __split_vma() bypasses sysctl_max_map_count checking. We use this where it 491 * has already been checked or doesn't make sense to fail. 492 * VMA Iterator will point to the original VMA. 493 */ 494 static __must_check int 495 __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, 496 unsigned long addr, int new_below) 497 { 498 struct vma_prepare vp; 499 struct vm_area_struct *new; 500 int err; 501 502 WARN_ON(vma->vm_start >= addr); 503 WARN_ON(vma->vm_end <= addr); 504 505 if (vma->vm_ops && vma->vm_ops->may_split) { 506 err = vma->vm_ops->may_split(vma, addr); 507 if (err) 508 return err; 509 } 510 511 new = vm_area_dup(vma); 512 if (!new) 513 return -ENOMEM; 514 515 if (new_below) { 516 new->vm_end = addr; 517 } else { 518 new->vm_start = addr; 519 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); 520 } 521 522 err = -ENOMEM; 523 vma_iter_config(vmi, new->vm_start, new->vm_end); 524 if (vma_iter_prealloc(vmi, new)) 525 goto out_free_vma; 526 527 err = vma_dup_policy(vma, new); 528 if (err) 529 goto out_free_vmi; 530 531 err = anon_vma_clone(new, vma); 532 if (err) 533 goto out_free_mpol; 534 535 if (new->vm_file) 536 get_file(new->vm_file); 537 538 if (new->vm_ops && new->vm_ops->open) 539 new->vm_ops->open(new); 540 541 vma_start_write(vma); 542 vma_start_write(new); 543 544 init_vma_prep(&vp, vma); 545 vp.insert = new; 546 vma_prepare(&vp); 547 548 /* 549 * Get rid of huge pages and shared page tables straddling the split 550 * boundary. 551 */ 552 vma_adjust_trans_huge(vma, vma->vm_start, addr, NULL); 553 if (is_vm_hugetlb_page(vma)) 554 hugetlb_split(vma, addr); 555 556 if (new_below) { 557 vma->vm_start = addr; 558 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT; 559 } else { 560 vma->vm_end = addr; 561 } 562 563 /* vma_complete stores the new vma */ 564 vma_complete(&vp, vmi, vma->vm_mm); 565 validate_mm(vma->vm_mm); 566 567 /* Success. */ 568 if (new_below) 569 vma_next(vmi); 570 else 571 vma_prev(vmi); 572 573 return 0; 574 575 out_free_mpol: 576 mpol_put(vma_policy(new)); 577 out_free_vmi: 578 vma_iter_free(vmi); 579 out_free_vma: 580 vm_area_free(new); 581 return err; 582 } 583 584 /* 585 * Split a vma into two pieces at address 'addr', a new vma is allocated 586 * either for the first part or the tail. 587 */ 588 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, 589 unsigned long addr, int new_below) 590 { 591 if (vma->vm_mm->map_count >= sysctl_max_map_count) 592 return -ENOMEM; 593 594 return __split_vma(vmi, vma, addr, new_below); 595 } 596 597 /* 598 * dup_anon_vma() - Helper function to duplicate anon_vma on VMA merge in the 599 * instance that the destination VMA has no anon_vma but the source does. 600 * 601 * @dst: The destination VMA 602 * @src: The source VMA 603 * @dup: Pointer to the destination VMA when successful. 604 * 605 * Returns: 0 on success. 606 */ 607 static int dup_anon_vma(struct vm_area_struct *dst, 608 struct vm_area_struct *src, struct vm_area_struct **dup) 609 { 610 /* 611 * There are three cases to consider for correctly propagating 612 * anon_vma's on merge. 613 * 614 * The first is trivial - neither VMA has anon_vma, we need not do 615 * anything. 616 * 617 * The second where both have anon_vma is also a no-op, as they must 618 * then be the same, so there is simply nothing to copy. 619 * 620 * Here we cover the third - if the destination VMA has no anon_vma, 621 * that is it is unfaulted, we need to ensure that the newly merged 622 * range is referenced by the anon_vma's of the source. 623 */ 624 if (src->anon_vma && !dst->anon_vma) { 625 int ret; 626 627 vma_assert_write_locked(dst); 628 dst->anon_vma = src->anon_vma; 629 ret = anon_vma_clone(dst, src); 630 if (ret) 631 return ret; 632 633 *dup = dst; 634 } 635 636 return 0; 637 } 638 639 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 640 void validate_mm(struct mm_struct *mm) 641 { 642 int bug = 0; 643 int i = 0; 644 struct vm_area_struct *vma; 645 VMA_ITERATOR(vmi, mm, 0); 646 647 mt_validate(&mm->mm_mt); 648 for_each_vma(vmi, vma) { 649 #ifdef CONFIG_DEBUG_VM_RB 650 struct anon_vma *anon_vma = vma->anon_vma; 651 struct anon_vma_chain *avc; 652 #endif 653 unsigned long vmi_start, vmi_end; 654 bool warn = 0; 655 656 vmi_start = vma_iter_addr(&vmi); 657 vmi_end = vma_iter_end(&vmi); 658 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm)) 659 warn = 1; 660 661 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm)) 662 warn = 1; 663 664 if (warn) { 665 pr_emerg("issue in %s\n", current->comm); 666 dump_stack(); 667 dump_vma(vma); 668 pr_emerg("tree range: %px start %lx end %lx\n", vma, 669 vmi_start, vmi_end - 1); 670 vma_iter_dump_tree(&vmi); 671 } 672 673 #ifdef CONFIG_DEBUG_VM_RB 674 if (anon_vma) { 675 anon_vma_lock_read(anon_vma); 676 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 677 anon_vma_interval_tree_verify(avc); 678 anon_vma_unlock_read(anon_vma); 679 } 680 #endif 681 /* Check for a infinite loop */ 682 if (++i > mm->map_count + 10) { 683 i = -1; 684 break; 685 } 686 } 687 if (i != mm->map_count) { 688 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i); 689 bug = 1; 690 } 691 VM_BUG_ON_MM(bug, mm); 692 } 693 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ 694 695 /* 696 * Based on the vmg flag indicating whether we need to adjust the vm_start field 697 * for the middle or next VMA, we calculate what the range of the newly adjusted 698 * VMA ought to be, and set the VMA's range accordingly. 699 */ 700 static void vmg_adjust_set_range(struct vma_merge_struct *vmg) 701 { 702 struct vm_area_struct *adjust; 703 pgoff_t pgoff; 704 705 if (vmg->__adjust_middle_start) { 706 adjust = vmg->middle; 707 pgoff = adjust->vm_pgoff + PHYS_PFN(vmg->end - adjust->vm_start); 708 } else if (vmg->__adjust_next_start) { 709 adjust = vmg->next; 710 pgoff = adjust->vm_pgoff - PHYS_PFN(adjust->vm_start - vmg->end); 711 } else { 712 return; 713 } 714 715 vma_set_range(adjust, vmg->end, adjust->vm_end, pgoff); 716 } 717 718 /* 719 * Actually perform the VMA merge operation. 720 * 721 * IMPORTANT: We guarantee that, should vmg->give_up_on_oom is set, to not 722 * modify any VMAs or cause inconsistent state should an OOM condition arise. 723 * 724 * Returns 0 on success, or an error value on failure. 725 */ 726 static int commit_merge(struct vma_merge_struct *vmg) 727 { 728 struct vm_area_struct *vma; 729 struct vma_prepare vp; 730 731 if (vmg->__adjust_next_start) { 732 /* We manipulate middle and adjust next, which is the target. */ 733 vma = vmg->middle; 734 vma_iter_config(vmg->vmi, vmg->end, vmg->next->vm_end); 735 } else { 736 vma = vmg->target; 737 /* Note: vma iterator must be pointing to 'start'. */ 738 vma_iter_config(vmg->vmi, vmg->start, vmg->end); 739 } 740 741 init_multi_vma_prep(&vp, vma, vmg); 742 743 /* 744 * If vmg->give_up_on_oom is set, we're safe, because we don't actually 745 * manipulate any VMAs until we succeed at preallocation. 746 * 747 * Past this point, we will not return an error. 748 */ 749 if (vma_iter_prealloc(vmg->vmi, vma)) 750 return -ENOMEM; 751 752 vma_prepare(&vp); 753 /* 754 * THP pages may need to do additional splits if we increase 755 * middle->vm_start. 756 */ 757 vma_adjust_trans_huge(vma, vmg->start, vmg->end, 758 vmg->__adjust_middle_start ? vmg->middle : NULL); 759 vma_set_range(vma, vmg->start, vmg->end, vmg->pgoff); 760 vmg_adjust_set_range(vmg); 761 vma_iter_store_overwrite(vmg->vmi, vmg->target); 762 763 vma_complete(&vp, vmg->vmi, vma->vm_mm); 764 765 return 0; 766 } 767 768 /* We can only remove VMAs when merging if they do not have a close hook. */ 769 static bool can_merge_remove_vma(struct vm_area_struct *vma) 770 { 771 return !vma->vm_ops || !vma->vm_ops->close; 772 } 773 774 /* 775 * vma_merge_existing_range - Attempt to merge VMAs based on a VMA having its 776 * attributes modified. 777 * 778 * @vmg: Describes the modifications being made to a VMA and associated 779 * metadata. 780 * 781 * When the attributes of a range within a VMA change, then it might be possible 782 * for immediately adjacent VMAs to be merged into that VMA due to having 783 * identical properties. 784 * 785 * This function checks for the existence of any such mergeable VMAs and updates 786 * the maple tree describing the @vmg->middle->vm_mm address space to account 787 * for this, as well as any VMAs shrunk/expanded/deleted as a result of this 788 * merge. 789 * 790 * As part of this operation, if a merge occurs, the @vmg object will have its 791 * vma, start, end, and pgoff fields modified to execute the merge. Subsequent 792 * calls to this function should reset these fields. 793 * 794 * Returns: The merged VMA if merge succeeds, or NULL otherwise. 795 * 796 * ASSUMPTIONS: 797 * - The caller must assign the VMA to be modified to @vmg->middle. 798 * - The caller must have set @vmg->prev to the previous VMA, if there is one. 799 * - The caller must not set @vmg->next, as we determine this. 800 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock. 801 * - vmi must be positioned within [@vmg->middle->vm_start, @vmg->middle->vm_end). 802 */ 803 static __must_check struct vm_area_struct *vma_merge_existing_range( 804 struct vma_merge_struct *vmg) 805 { 806 vm_flags_t sticky_flags = vmg->vm_flags & VM_STICKY; 807 struct vm_area_struct *middle = vmg->middle; 808 struct vm_area_struct *prev = vmg->prev; 809 struct vm_area_struct *next; 810 struct vm_area_struct *anon_dup = NULL; 811 unsigned long start = vmg->start; 812 unsigned long end = vmg->end; 813 bool left_side = middle && start == middle->vm_start; 814 bool right_side = middle && end == middle->vm_end; 815 int err = 0; 816 bool merge_left, merge_right, merge_both; 817 818 mmap_assert_write_locked(vmg->mm); 819 VM_WARN_ON_VMG(!middle, vmg); /* We are modifying a VMA, so caller must specify. */ 820 VM_WARN_ON_VMG(vmg->next, vmg); /* We set this. */ 821 VM_WARN_ON_VMG(prev && start <= prev->vm_start, vmg); 822 VM_WARN_ON_VMG(start >= end, vmg); 823 824 /* 825 * If middle == prev, then we are offset into a VMA. Otherwise, if we are 826 * not, we must span a portion of the VMA. 827 */ 828 VM_WARN_ON_VMG(middle && 829 ((middle != prev && vmg->start != middle->vm_start) || 830 vmg->end > middle->vm_end), vmg); 831 /* The vmi must be positioned within vmg->middle. */ 832 VM_WARN_ON_VMG(middle && 833 !(vma_iter_addr(vmg->vmi) >= middle->vm_start && 834 vma_iter_addr(vmg->vmi) < middle->vm_end), vmg); 835 /* An existing merge can never be used by the mremap() logic. */ 836 VM_WARN_ON_VMG(vmg->copied_from, vmg); 837 838 vmg->state = VMA_MERGE_NOMERGE; 839 840 /* 841 * If a special mapping or if the range being modified is neither at the 842 * furthermost left or right side of the VMA, then we have no chance of 843 * merging and should abort. 844 */ 845 if (vmg->vm_flags & VM_SPECIAL || (!left_side && !right_side)) 846 return NULL; 847 848 if (left_side) 849 merge_left = can_vma_merge_left(vmg); 850 else 851 merge_left = false; 852 853 if (right_side) { 854 next = vmg->next = vma_iter_next_range(vmg->vmi); 855 vma_iter_prev_range(vmg->vmi); 856 857 merge_right = can_vma_merge_right(vmg, merge_left); 858 } else { 859 merge_right = false; 860 next = NULL; 861 } 862 863 if (merge_left) /* If merging prev, position iterator there. */ 864 vma_prev(vmg->vmi); 865 else if (!merge_right) /* If we have nothing to merge, abort. */ 866 return NULL; 867 868 merge_both = merge_left && merge_right; 869 /* If we span the entire VMA, a merge implies it will be deleted. */ 870 vmg->__remove_middle = left_side && right_side; 871 872 /* 873 * If we need to remove middle in its entirety but are unable to do so, 874 * we have no sensible recourse but to abort the merge. 875 */ 876 if (vmg->__remove_middle && !can_merge_remove_vma(middle)) 877 return NULL; 878 879 /* 880 * If we merge both VMAs, then next is also deleted. This implies 881 * merge_will_delete_vma also. 882 */ 883 vmg->__remove_next = merge_both; 884 885 /* 886 * If we cannot delete next, then we can reduce the operation to merging 887 * prev and middle (thereby deleting middle). 888 */ 889 if (vmg->__remove_next && !can_merge_remove_vma(next)) { 890 vmg->__remove_next = false; 891 merge_right = false; 892 merge_both = false; 893 } 894 895 /* No matter what happens, we will be adjusting middle. */ 896 vma_start_write(middle); 897 898 if (merge_right) { 899 vma_start_write(next); 900 vmg->target = next; 901 sticky_flags |= (next->vm_flags & VM_STICKY); 902 } 903 904 if (merge_left) { 905 vma_start_write(prev); 906 vmg->target = prev; 907 sticky_flags |= (prev->vm_flags & VM_STICKY); 908 } 909 910 if (merge_both) { 911 /* 912 * |<-------------------->| 913 * |-------********-------| 914 * prev middle next 915 * extend delete delete 916 */ 917 918 vmg->start = prev->vm_start; 919 vmg->end = next->vm_end; 920 vmg->pgoff = prev->vm_pgoff; 921 922 /* 923 * We already ensured anon_vma compatibility above, so now it's 924 * simply a case of, if prev has no anon_vma object, which of 925 * next or middle contains the anon_vma we must duplicate. 926 */ 927 err = dup_anon_vma(prev, next->anon_vma ? next : middle, 928 &anon_dup); 929 } else if (merge_left) { 930 /* 931 * |<------------>| OR 932 * |<----------------->| 933 * |-------************* 934 * prev middle 935 * extend shrink/delete 936 */ 937 938 vmg->start = prev->vm_start; 939 vmg->pgoff = prev->vm_pgoff; 940 941 if (!vmg->__remove_middle) 942 vmg->__adjust_middle_start = true; 943 944 err = dup_anon_vma(prev, middle, &anon_dup); 945 } else { /* merge_right */ 946 /* 947 * |<------------->| OR 948 * |<----------------->| 949 * *************-------| 950 * middle next 951 * shrink/delete extend 952 */ 953 954 pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start); 955 956 VM_WARN_ON_VMG(!merge_right, vmg); 957 /* If we are offset into a VMA, then prev must be middle. */ 958 VM_WARN_ON_VMG(vmg->start > middle->vm_start && prev && middle != prev, vmg); 959 960 if (vmg->__remove_middle) { 961 vmg->end = next->vm_end; 962 vmg->pgoff = next->vm_pgoff - pglen; 963 } else { 964 /* We shrink middle and expand next. */ 965 vmg->__adjust_next_start = true; 966 vmg->start = middle->vm_start; 967 vmg->end = start; 968 vmg->pgoff = middle->vm_pgoff; 969 } 970 971 err = dup_anon_vma(next, middle, &anon_dup); 972 } 973 974 if (err || commit_merge(vmg)) 975 goto abort; 976 977 vm_flags_set(vmg->target, sticky_flags); 978 khugepaged_enter_vma(vmg->target, vmg->vm_flags); 979 vmg->state = VMA_MERGE_SUCCESS; 980 return vmg->target; 981 982 abort: 983 vma_iter_set(vmg->vmi, start); 984 vma_iter_load(vmg->vmi); 985 986 if (anon_dup) 987 unlink_anon_vmas(anon_dup); 988 989 /* 990 * This means we have failed to clone anon_vma's correctly, but no 991 * actual changes to VMAs have occurred, so no harm no foul - if the 992 * user doesn't want this reported and instead just wants to give up on 993 * the merge, allow it. 994 */ 995 if (!vmg->give_up_on_oom) 996 vmg->state = VMA_MERGE_ERROR_NOMEM; 997 return NULL; 998 } 999 1000 /* 1001 * vma_merge_new_range - Attempt to merge a new VMA into address space 1002 * 1003 * @vmg: Describes the VMA we are adding, in the range @vmg->start to @vmg->end 1004 * (exclusive), which we try to merge with any adjacent VMAs if possible. 1005 * 1006 * We are about to add a VMA to the address space starting at @vmg->start and 1007 * ending at @vmg->end. There are three different possible scenarios: 1008 * 1009 * 1. There is a VMA with identical properties immediately adjacent to the 1010 * proposed new VMA [@vmg->start, @vmg->end) either before or after it - 1011 * EXPAND that VMA: 1012 * 1013 * Proposed: |-----| or |-----| 1014 * Existing: |----| |----| 1015 * 1016 * 2. There are VMAs with identical properties immediately adjacent to the 1017 * proposed new VMA [@vmg->start, @vmg->end) both before AND after it - 1018 * EXPAND the former and REMOVE the latter: 1019 * 1020 * Proposed: |-----| 1021 * Existing: |----| |----| 1022 * 1023 * 3. There are no VMAs immediately adjacent to the proposed new VMA or those 1024 * VMAs do not have identical attributes - NO MERGE POSSIBLE. 1025 * 1026 * In instances where we can merge, this function returns the expanded VMA which 1027 * will have its range adjusted accordingly and the underlying maple tree also 1028 * adjusted. 1029 * 1030 * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer 1031 * to the VMA we expanded. 1032 * 1033 * This function adjusts @vmg to provide @vmg->next if not already specified, 1034 * and adjusts [@vmg->start, @vmg->end) to span the expanded range. 1035 * 1036 * ASSUMPTIONS: 1037 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock. 1038 * - The caller must have determined that [@vmg->start, @vmg->end) is empty, 1039 other than VMAs that will be unmapped should the operation succeed. 1040 * - The caller must have specified the previous vma in @vmg->prev. 1041 * - The caller must have specified the next vma in @vmg->next. 1042 * - The caller must have positioned the vmi at or before the gap. 1043 */ 1044 struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) 1045 { 1046 struct vm_area_struct *prev = vmg->prev; 1047 struct vm_area_struct *next = vmg->next; 1048 unsigned long end = vmg->end; 1049 bool can_merge_left, can_merge_right; 1050 1051 mmap_assert_write_locked(vmg->mm); 1052 VM_WARN_ON_VMG(vmg->middle, vmg); 1053 VM_WARN_ON_VMG(vmg->target, vmg); 1054 /* vmi must point at or before the gap. */ 1055 VM_WARN_ON_VMG(vma_iter_addr(vmg->vmi) > end, vmg); 1056 1057 vmg->state = VMA_MERGE_NOMERGE; 1058 1059 /* Special VMAs are unmergeable, also if no prev/next. */ 1060 if ((vmg->vm_flags & VM_SPECIAL) || (!prev && !next)) 1061 return NULL; 1062 1063 can_merge_left = can_vma_merge_left(vmg); 1064 can_merge_right = !vmg->just_expand && can_vma_merge_right(vmg, can_merge_left); 1065 1066 /* If we can merge with the next VMA, adjust vmg accordingly. */ 1067 if (can_merge_right) { 1068 vmg->end = next->vm_end; 1069 vmg->target = next; 1070 } 1071 1072 /* If we can merge with the previous VMA, adjust vmg accordingly. */ 1073 if (can_merge_left) { 1074 vmg->start = prev->vm_start; 1075 vmg->target = prev; 1076 vmg->pgoff = prev->vm_pgoff; 1077 1078 /* 1079 * If this merge would result in removal of the next VMA but we 1080 * are not permitted to do so, reduce the operation to merging 1081 * prev and vma. 1082 */ 1083 if (can_merge_right && !can_merge_remove_vma(next)) 1084 vmg->end = end; 1085 1086 /* In expand-only case we are already positioned at prev. */ 1087 if (!vmg->just_expand) { 1088 /* Equivalent to going to the previous range. */ 1089 vma_prev(vmg->vmi); 1090 } 1091 } 1092 1093 /* 1094 * Now try to expand adjacent VMA(s). This takes care of removing the 1095 * following VMA if we have VMAs on both sides. 1096 */ 1097 if (vmg->target && !vma_expand(vmg)) { 1098 khugepaged_enter_vma(vmg->target, vmg->vm_flags); 1099 vmg->state = VMA_MERGE_SUCCESS; 1100 return vmg->target; 1101 } 1102 1103 return NULL; 1104 } 1105 1106 /* 1107 * vma_merge_copied_range - Attempt to merge a VMA that is being copied by 1108 * mremap() 1109 * 1110 * @vmg: Describes the VMA we are adding, in the copied-to range @vmg->start to 1111 * @vmg->end (exclusive), which we try to merge with any adjacent VMAs if 1112 * possible. 1113 * 1114 * vmg->prev, next, start, end, pgoff should all be relative to the COPIED TO 1115 * range, i.e. the target range for the VMA. 1116 * 1117 * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer 1118 * to the VMA we expanded. 1119 * 1120 * ASSUMPTIONS: Same as vma_merge_new_range(), except vmg->middle must contain 1121 * the copied-from VMA. 1122 */ 1123 static struct vm_area_struct *vma_merge_copied_range(struct vma_merge_struct *vmg) 1124 { 1125 /* We must have a copied-from VMA. */ 1126 VM_WARN_ON_VMG(!vmg->middle, vmg); 1127 1128 vmg->copied_from = vmg->middle; 1129 vmg->middle = NULL; 1130 return vma_merge_new_range(vmg); 1131 } 1132 1133 /* 1134 * vma_expand - Expand an existing VMA 1135 * 1136 * @vmg: Describes a VMA expansion operation. 1137 * 1138 * Expand @vma to vmg->start and vmg->end. Can expand off the start and end. 1139 * Will expand over vmg->next if it's different from vmg->target and vmg->end == 1140 * vmg->next->vm_end. Checking if the vmg->target can expand and merge with 1141 * vmg->next needs to be handled by the caller. 1142 * 1143 * Returns: 0 on success. 1144 * 1145 * ASSUMPTIONS: 1146 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock. 1147 * - The caller must have set @vmg->target and @vmg->next. 1148 */ 1149 int vma_expand(struct vma_merge_struct *vmg) 1150 { 1151 struct vm_area_struct *anon_dup = NULL; 1152 struct vm_area_struct *target = vmg->target; 1153 struct vm_area_struct *next = vmg->next; 1154 bool remove_next = false; 1155 vm_flags_t sticky_flags; 1156 int ret = 0; 1157 1158 mmap_assert_write_locked(vmg->mm); 1159 vma_start_write(target); 1160 1161 if (next && target != next && vmg->end == next->vm_end) 1162 remove_next = true; 1163 1164 /* We must have a target. */ 1165 VM_WARN_ON_VMG(!target, vmg); 1166 /* This should have already been checked by this point. */ 1167 VM_WARN_ON_VMG(remove_next && !can_merge_remove_vma(next), vmg); 1168 /* Not merging but overwriting any part of next is not handled. */ 1169 VM_WARN_ON_VMG(next && !remove_next && 1170 next != target && vmg->end > next->vm_start, vmg); 1171 /* Only handles expanding. */ 1172 VM_WARN_ON_VMG(target->vm_start < vmg->start || 1173 target->vm_end > vmg->end, vmg); 1174 1175 sticky_flags = vmg->vm_flags & VM_STICKY; 1176 sticky_flags |= target->vm_flags & VM_STICKY; 1177 if (remove_next) 1178 sticky_flags |= next->vm_flags & VM_STICKY; 1179 1180 /* 1181 * If we are removing the next VMA or copying from a VMA 1182 * (e.g. mremap()'ing), we must propagate anon_vma state. 1183 * 1184 * Note that, by convention, callers ignore OOM for this case, so 1185 * we don't need to account for vmg->give_up_on_mm here. 1186 */ 1187 if (remove_next) 1188 ret = dup_anon_vma(target, next, &anon_dup); 1189 if (!ret && vmg->copied_from) 1190 ret = dup_anon_vma(target, vmg->copied_from, &anon_dup); 1191 if (ret) 1192 return ret; 1193 1194 if (remove_next) { 1195 vma_start_write(next); 1196 vmg->__remove_next = true; 1197 } 1198 if (commit_merge(vmg)) 1199 goto nomem; 1200 1201 vm_flags_set(target, sticky_flags); 1202 return 0; 1203 1204 nomem: 1205 if (anon_dup) 1206 unlink_anon_vmas(anon_dup); 1207 /* 1208 * If the user requests that we just give upon OOM, we are safe to do so 1209 * here, as commit merge provides this contract to us. Nothing has been 1210 * changed - no harm no foul, just don't report it. 1211 */ 1212 if (!vmg->give_up_on_oom) 1213 vmg->state = VMA_MERGE_ERROR_NOMEM; 1214 return -ENOMEM; 1215 } 1216 1217 /* 1218 * vma_shrink() - Reduce an existing VMAs memory area 1219 * @vmi: The vma iterator 1220 * @vma: The VMA to modify 1221 * @start: The new start 1222 * @end: The new end 1223 * 1224 * Returns: 0 on success, -ENOMEM otherwise 1225 */ 1226 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, 1227 unsigned long start, unsigned long end, pgoff_t pgoff) 1228 { 1229 struct vma_prepare vp; 1230 1231 WARN_ON((vma->vm_start != start) && (vma->vm_end != end)); 1232 1233 if (vma->vm_start < start) 1234 vma_iter_config(vmi, vma->vm_start, start); 1235 else 1236 vma_iter_config(vmi, end, vma->vm_end); 1237 1238 if (vma_iter_prealloc(vmi, NULL)) 1239 return -ENOMEM; 1240 1241 vma_start_write(vma); 1242 1243 init_vma_prep(&vp, vma); 1244 vma_prepare(&vp); 1245 vma_adjust_trans_huge(vma, start, end, NULL); 1246 1247 vma_iter_clear(vmi); 1248 vma_set_range(vma, start, end, pgoff); 1249 vma_complete(&vp, vmi, vma->vm_mm); 1250 validate_mm(vma->vm_mm); 1251 return 0; 1252 } 1253 1254 static inline void vms_clear_ptes(struct vma_munmap_struct *vms, 1255 struct ma_state *mas_detach, bool mm_wr_locked) 1256 { 1257 struct mmu_gather tlb; 1258 1259 if (!vms->clear_ptes) /* Nothing to do */ 1260 return; 1261 1262 /* 1263 * We can free page tables without write-locking mmap_lock because VMAs 1264 * were isolated before we downgraded mmap_lock. 1265 */ 1266 mas_set(mas_detach, 1); 1267 tlb_gather_mmu(&tlb, vms->vma->vm_mm); 1268 update_hiwater_rss(vms->vma->vm_mm); 1269 unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end, 1270 vms->vma_count); 1271 1272 mas_set(mas_detach, 1); 1273 /* start and end may be different if there is no prev or next vma. */ 1274 free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start, 1275 vms->unmap_end, mm_wr_locked); 1276 tlb_finish_mmu(&tlb); 1277 vms->clear_ptes = false; 1278 } 1279 1280 static void vms_clean_up_area(struct vma_munmap_struct *vms, 1281 struct ma_state *mas_detach) 1282 { 1283 struct vm_area_struct *vma; 1284 1285 if (!vms->nr_pages) 1286 return; 1287 1288 vms_clear_ptes(vms, mas_detach, true); 1289 mas_set(mas_detach, 0); 1290 mas_for_each(mas_detach, vma, ULONG_MAX) 1291 vma_close(vma); 1292 } 1293 1294 /* 1295 * vms_complete_munmap_vmas() - Finish the munmap() operation 1296 * @vms: The vma munmap struct 1297 * @mas_detach: The maple state of the detached vmas 1298 * 1299 * This updates the mm_struct, unmaps the region, frees the resources 1300 * used for the munmap() and may downgrade the lock - if requested. Everything 1301 * needed to be done once the vma maple tree is updated. 1302 */ 1303 static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms, 1304 struct ma_state *mas_detach) 1305 { 1306 struct vm_area_struct *vma; 1307 struct mm_struct *mm; 1308 1309 mm = current->mm; 1310 mm->map_count -= vms->vma_count; 1311 mm->locked_vm -= vms->locked_vm; 1312 if (vms->unlock) 1313 mmap_write_downgrade(mm); 1314 1315 if (!vms->nr_pages) 1316 return; 1317 1318 vms_clear_ptes(vms, mas_detach, !vms->unlock); 1319 /* Update high watermark before we lower total_vm */ 1320 update_hiwater_vm(mm); 1321 /* Stat accounting */ 1322 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages); 1323 /* Paranoid bookkeeping */ 1324 VM_WARN_ON(vms->exec_vm > mm->exec_vm); 1325 VM_WARN_ON(vms->stack_vm > mm->stack_vm); 1326 VM_WARN_ON(vms->data_vm > mm->data_vm); 1327 mm->exec_vm -= vms->exec_vm; 1328 mm->stack_vm -= vms->stack_vm; 1329 mm->data_vm -= vms->data_vm; 1330 1331 /* Remove and clean up vmas */ 1332 mas_set(mas_detach, 0); 1333 mas_for_each(mas_detach, vma, ULONG_MAX) 1334 remove_vma(vma); 1335 1336 vm_unacct_memory(vms->nr_accounted); 1337 validate_mm(mm); 1338 if (vms->unlock) 1339 mmap_read_unlock(mm); 1340 1341 __mt_destroy(mas_detach->tree); 1342 } 1343 1344 /* 1345 * reattach_vmas() - Undo any munmap work and free resources 1346 * @mas_detach: The maple state with the detached maple tree 1347 * 1348 * Reattach any detached vmas and free up the maple tree used to track the vmas. 1349 */ 1350 static void reattach_vmas(struct ma_state *mas_detach) 1351 { 1352 struct vm_area_struct *vma; 1353 1354 mas_set(mas_detach, 0); 1355 mas_for_each(mas_detach, vma, ULONG_MAX) 1356 vma_mark_attached(vma); 1357 1358 __mt_destroy(mas_detach->tree); 1359 } 1360 1361 /* 1362 * vms_gather_munmap_vmas() - Put all VMAs within a range into a maple tree 1363 * for removal at a later date. Handles splitting first and last if necessary 1364 * and marking the vmas as isolated. 1365 * 1366 * @vms: The vma munmap struct 1367 * @mas_detach: The maple state tracking the detached tree 1368 * 1369 * Return: 0 on success, error otherwise 1370 */ 1371 static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms, 1372 struct ma_state *mas_detach) 1373 { 1374 struct vm_area_struct *next = NULL; 1375 int error; 1376 1377 /* 1378 * If we need to split any vma, do it now to save pain later. 1379 * Does it split the first one? 1380 */ 1381 if (vms->start > vms->vma->vm_start) { 1382 1383 /* 1384 * Make sure that map_count on return from munmap() will 1385 * not exceed its limit; but let map_count go just above 1386 * its limit temporarily, to help free resources as expected. 1387 */ 1388 if (vms->end < vms->vma->vm_end && 1389 vms->vma->vm_mm->map_count >= sysctl_max_map_count) { 1390 error = -ENOMEM; 1391 goto map_count_exceeded; 1392 } 1393 1394 /* Don't bother splitting the VMA if we can't unmap it anyway */ 1395 if (vma_is_sealed(vms->vma)) { 1396 error = -EPERM; 1397 goto start_split_failed; 1398 } 1399 1400 error = __split_vma(vms->vmi, vms->vma, vms->start, 1); 1401 if (error) 1402 goto start_split_failed; 1403 } 1404 vms->prev = vma_prev(vms->vmi); 1405 if (vms->prev) 1406 vms->unmap_start = vms->prev->vm_end; 1407 1408 /* 1409 * Detach a range of VMAs from the mm. Using next as a temp variable as 1410 * it is always overwritten. 1411 */ 1412 for_each_vma_range(*(vms->vmi), next, vms->end) { 1413 long nrpages; 1414 1415 if (vma_is_sealed(next)) { 1416 error = -EPERM; 1417 goto modify_vma_failed; 1418 } 1419 /* Does it split the end? */ 1420 if (next->vm_end > vms->end) { 1421 error = __split_vma(vms->vmi, next, vms->end, 0); 1422 if (error) 1423 goto end_split_failed; 1424 } 1425 vma_start_write(next); 1426 mas_set(mas_detach, vms->vma_count++); 1427 error = mas_store_gfp(mas_detach, next, GFP_KERNEL); 1428 if (error) 1429 goto munmap_gather_failed; 1430 1431 vma_mark_detached(next); 1432 nrpages = vma_pages(next); 1433 1434 vms->nr_pages += nrpages; 1435 if (next->vm_flags & VM_LOCKED) 1436 vms->locked_vm += nrpages; 1437 1438 if (next->vm_flags & VM_ACCOUNT) 1439 vms->nr_accounted += nrpages; 1440 1441 if (is_exec_mapping(next->vm_flags)) 1442 vms->exec_vm += nrpages; 1443 else if (is_stack_mapping(next->vm_flags)) 1444 vms->stack_vm += nrpages; 1445 else if (is_data_mapping(next->vm_flags)) 1446 vms->data_vm += nrpages; 1447 1448 if (vms->uf) { 1449 /* 1450 * If userfaultfd_unmap_prep returns an error the vmas 1451 * will remain split, but userland will get a 1452 * highly unexpected error anyway. This is no 1453 * different than the case where the first of the two 1454 * __split_vma fails, but we don't undo the first 1455 * split, despite we could. This is unlikely enough 1456 * failure that it's not worth optimizing it for. 1457 */ 1458 error = userfaultfd_unmap_prep(next, vms->start, 1459 vms->end, vms->uf); 1460 if (error) 1461 goto userfaultfd_error; 1462 } 1463 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 1464 BUG_ON(next->vm_start < vms->start); 1465 BUG_ON(next->vm_start > vms->end); 1466 #endif 1467 } 1468 1469 vms->next = vma_next(vms->vmi); 1470 if (vms->next) 1471 vms->unmap_end = vms->next->vm_start; 1472 1473 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 1474 /* Make sure no VMAs are about to be lost. */ 1475 { 1476 MA_STATE(test, mas_detach->tree, 0, 0); 1477 struct vm_area_struct *vma_mas, *vma_test; 1478 int test_count = 0; 1479 1480 vma_iter_set(vms->vmi, vms->start); 1481 rcu_read_lock(); 1482 vma_test = mas_find(&test, vms->vma_count - 1); 1483 for_each_vma_range(*(vms->vmi), vma_mas, vms->end) { 1484 BUG_ON(vma_mas != vma_test); 1485 test_count++; 1486 vma_test = mas_next(&test, vms->vma_count - 1); 1487 } 1488 rcu_read_unlock(); 1489 BUG_ON(vms->vma_count != test_count); 1490 } 1491 #endif 1492 1493 while (vma_iter_addr(vms->vmi) > vms->start) 1494 vma_iter_prev_range(vms->vmi); 1495 1496 vms->clear_ptes = true; 1497 return 0; 1498 1499 userfaultfd_error: 1500 munmap_gather_failed: 1501 end_split_failed: 1502 modify_vma_failed: 1503 reattach_vmas(mas_detach); 1504 start_split_failed: 1505 map_count_exceeded: 1506 return error; 1507 } 1508 1509 /* 1510 * init_vma_munmap() - Initializer wrapper for vma_munmap_struct 1511 * @vms: The vma munmap struct 1512 * @vmi: The vma iterator 1513 * @vma: The first vm_area_struct to munmap 1514 * @start: The aligned start address to munmap 1515 * @end: The aligned end address to munmap 1516 * @uf: The userfaultfd list_head 1517 * @unlock: Unlock after the operation. Only unlocked on success 1518 */ 1519 static void init_vma_munmap(struct vma_munmap_struct *vms, 1520 struct vma_iterator *vmi, struct vm_area_struct *vma, 1521 unsigned long start, unsigned long end, struct list_head *uf, 1522 bool unlock) 1523 { 1524 vms->vmi = vmi; 1525 vms->vma = vma; 1526 if (vma) { 1527 vms->start = start; 1528 vms->end = end; 1529 } else { 1530 vms->start = vms->end = 0; 1531 } 1532 vms->unlock = unlock; 1533 vms->uf = uf; 1534 vms->vma_count = 0; 1535 vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0; 1536 vms->exec_vm = vms->stack_vm = vms->data_vm = 0; 1537 vms->unmap_start = FIRST_USER_ADDRESS; 1538 vms->unmap_end = USER_PGTABLES_CEILING; 1539 vms->clear_ptes = false; 1540 } 1541 1542 /* 1543 * do_vmi_align_munmap() - munmap the aligned region from @start to @end. 1544 * @vmi: The vma iterator 1545 * @vma: The starting vm_area_struct 1546 * @mm: The mm_struct 1547 * @start: The aligned start address to munmap. 1548 * @end: The aligned end address to munmap. 1549 * @uf: The userfaultfd list_head 1550 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on 1551 * success. 1552 * 1553 * Return: 0 on success and drops the lock if so directed, error and leaves the 1554 * lock held otherwise. 1555 */ 1556 int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 1557 struct mm_struct *mm, unsigned long start, unsigned long end, 1558 struct list_head *uf, bool unlock) 1559 { 1560 struct maple_tree mt_detach; 1561 MA_STATE(mas_detach, &mt_detach, 0, 0); 1562 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); 1563 mt_on_stack(mt_detach); 1564 struct vma_munmap_struct vms; 1565 int error; 1566 1567 init_vma_munmap(&vms, vmi, vma, start, end, uf, unlock); 1568 error = vms_gather_munmap_vmas(&vms, &mas_detach); 1569 if (error) 1570 goto gather_failed; 1571 1572 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL); 1573 if (error) 1574 goto clear_tree_failed; 1575 1576 /* Point of no return */ 1577 vms_complete_munmap_vmas(&vms, &mas_detach); 1578 return 0; 1579 1580 clear_tree_failed: 1581 reattach_vmas(&mas_detach); 1582 gather_failed: 1583 validate_mm(mm); 1584 return error; 1585 } 1586 1587 /* 1588 * do_vmi_munmap() - munmap a given range. 1589 * @vmi: The vma iterator 1590 * @mm: The mm_struct 1591 * @start: The start address to munmap 1592 * @len: The length of the range to munmap 1593 * @uf: The userfaultfd list_head 1594 * @unlock: set to true if the user wants to drop the mmap_lock on success 1595 * 1596 * This function takes a @mas that is either pointing to the previous VMA or set 1597 * to MA_START and sets it up to remove the mapping(s). The @len will be 1598 * aligned. 1599 * 1600 * Return: 0 on success and drops the lock if so directed, error and leaves the 1601 * lock held otherwise. 1602 */ 1603 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, 1604 unsigned long start, size_t len, struct list_head *uf, 1605 bool unlock) 1606 { 1607 unsigned long end; 1608 struct vm_area_struct *vma; 1609 1610 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) 1611 return -EINVAL; 1612 1613 end = start + PAGE_ALIGN(len); 1614 if (end == start) 1615 return -EINVAL; 1616 1617 /* Find the first overlapping VMA */ 1618 vma = vma_find(vmi, end); 1619 if (!vma) { 1620 if (unlock) 1621 mmap_write_unlock(mm); 1622 return 0; 1623 } 1624 1625 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); 1626 } 1627 1628 /* 1629 * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd 1630 * context and anonymous VMA name within the range [start, end). 1631 * 1632 * As a result, we might be able to merge the newly modified VMA range with an 1633 * adjacent VMA with identical properties. 1634 * 1635 * If no merge is possible and the range does not span the entirety of the VMA, 1636 * we then need to split the VMA to accommodate the change. 1637 * 1638 * The function returns either the merged VMA, the original VMA if a split was 1639 * required instead, or an error if the split failed. 1640 */ 1641 static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg) 1642 { 1643 struct vm_area_struct *vma = vmg->middle; 1644 unsigned long start = vmg->start; 1645 unsigned long end = vmg->end; 1646 struct vm_area_struct *merged; 1647 1648 /* First, try to merge. */ 1649 merged = vma_merge_existing_range(vmg); 1650 if (merged) 1651 return merged; 1652 if (vmg_nomem(vmg)) 1653 return ERR_PTR(-ENOMEM); 1654 1655 /* 1656 * Split can fail for reasons other than OOM, so if the user requests 1657 * this it's probably a mistake. 1658 */ 1659 VM_WARN_ON(vmg->give_up_on_oom && 1660 (vma->vm_start != start || vma->vm_end != end)); 1661 1662 /* Split any preceding portion of the VMA. */ 1663 if (vma->vm_start < start) { 1664 int err = split_vma(vmg->vmi, vma, start, 1); 1665 1666 if (err) 1667 return ERR_PTR(err); 1668 } 1669 1670 /* Split any trailing portion of the VMA. */ 1671 if (vma->vm_end > end) { 1672 int err = split_vma(vmg->vmi, vma, end, 0); 1673 1674 if (err) 1675 return ERR_PTR(err); 1676 } 1677 1678 return vma; 1679 } 1680 1681 struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi, 1682 struct vm_area_struct *prev, struct vm_area_struct *vma, 1683 unsigned long start, unsigned long end, 1684 vm_flags_t *vm_flags_ptr) 1685 { 1686 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1687 const vm_flags_t vm_flags = *vm_flags_ptr; 1688 struct vm_area_struct *ret; 1689 1690 vmg.vm_flags = vm_flags; 1691 1692 ret = vma_modify(&vmg); 1693 if (IS_ERR(ret)) 1694 return ret; 1695 1696 /* 1697 * For a merge to succeed, the flags must match those 1698 * requested. However, sticky flags may have been retained, so propagate 1699 * them to the caller. 1700 */ 1701 if (vmg.state == VMA_MERGE_SUCCESS) 1702 *vm_flags_ptr = ret->vm_flags; 1703 return ret; 1704 } 1705 1706 struct vm_area_struct *vma_modify_name(struct vma_iterator *vmi, 1707 struct vm_area_struct *prev, struct vm_area_struct *vma, 1708 unsigned long start, unsigned long end, 1709 struct anon_vma_name *new_name) 1710 { 1711 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1712 1713 vmg.anon_name = new_name; 1714 1715 return vma_modify(&vmg); 1716 } 1717 1718 struct vm_area_struct *vma_modify_policy(struct vma_iterator *vmi, 1719 struct vm_area_struct *prev, struct vm_area_struct *vma, 1720 unsigned long start, unsigned long end, 1721 struct mempolicy *new_pol) 1722 { 1723 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1724 1725 vmg.policy = new_pol; 1726 1727 return vma_modify(&vmg); 1728 } 1729 1730 struct vm_area_struct *vma_modify_flags_uffd(struct vma_iterator *vmi, 1731 struct vm_area_struct *prev, struct vm_area_struct *vma, 1732 unsigned long start, unsigned long end, vm_flags_t vm_flags, 1733 struct vm_userfaultfd_ctx new_ctx, bool give_up_on_oom) 1734 { 1735 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1736 1737 vmg.vm_flags = vm_flags; 1738 vmg.uffd_ctx = new_ctx; 1739 if (give_up_on_oom) 1740 vmg.give_up_on_oom = true; 1741 1742 return vma_modify(&vmg); 1743 } 1744 1745 /* 1746 * Expand vma by delta bytes, potentially merging with an immediately adjacent 1747 * VMA with identical properties. 1748 */ 1749 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi, 1750 struct vm_area_struct *vma, 1751 unsigned long delta) 1752 { 1753 VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta); 1754 1755 vmg.next = vma_iter_next_rewind(vmi, NULL); 1756 vmg.middle = NULL; /* We use the VMA to populate VMG fields only. */ 1757 1758 return vma_merge_new_range(&vmg); 1759 } 1760 1761 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb) 1762 { 1763 vb->count = 0; 1764 } 1765 1766 static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb) 1767 { 1768 struct address_space *mapping; 1769 int i; 1770 1771 mapping = vb->vmas[0]->vm_file->f_mapping; 1772 i_mmap_lock_write(mapping); 1773 for (i = 0; i < vb->count; i++) { 1774 VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping); 1775 __remove_shared_vm_struct(vb->vmas[i], mapping); 1776 } 1777 i_mmap_unlock_write(mapping); 1778 1779 unlink_file_vma_batch_init(vb); 1780 } 1781 1782 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb, 1783 struct vm_area_struct *vma) 1784 { 1785 if (vma->vm_file == NULL) 1786 return; 1787 1788 if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) || 1789 vb->count == ARRAY_SIZE(vb->vmas)) 1790 unlink_file_vma_batch_process(vb); 1791 1792 vb->vmas[vb->count] = vma; 1793 vb->count++; 1794 } 1795 1796 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb) 1797 { 1798 if (vb->count > 0) 1799 unlink_file_vma_batch_process(vb); 1800 } 1801 1802 static void vma_link_file(struct vm_area_struct *vma, bool hold_rmap_lock) 1803 { 1804 struct file *file = vma->vm_file; 1805 struct address_space *mapping; 1806 1807 if (file) { 1808 mapping = file->f_mapping; 1809 i_mmap_lock_write(mapping); 1810 __vma_link_file(vma, mapping); 1811 if (!hold_rmap_lock) 1812 i_mmap_unlock_write(mapping); 1813 } 1814 } 1815 1816 static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) 1817 { 1818 VMA_ITERATOR(vmi, mm, 0); 1819 1820 vma_iter_config(&vmi, vma->vm_start, vma->vm_end); 1821 if (vma_iter_prealloc(&vmi, vma)) 1822 return -ENOMEM; 1823 1824 vma_start_write(vma); 1825 vma_iter_store_new(&vmi, vma); 1826 vma_link_file(vma, /* hold_rmap_lock= */false); 1827 mm->map_count++; 1828 validate_mm(mm); 1829 return 0; 1830 } 1831 1832 /* 1833 * Copy the vma structure to a new location in the same mm, 1834 * prior to moving page table entries, to effect an mremap move. 1835 */ 1836 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 1837 unsigned long addr, unsigned long len, pgoff_t pgoff, 1838 bool *need_rmap_locks) 1839 { 1840 struct vm_area_struct *vma = *vmap; 1841 unsigned long vma_start = vma->vm_start; 1842 struct mm_struct *mm = vma->vm_mm; 1843 struct vm_area_struct *new_vma; 1844 bool faulted_in_anon_vma = true; 1845 VMA_ITERATOR(vmi, mm, addr); 1846 VMG_VMA_STATE(vmg, &vmi, NULL, vma, addr, addr + len); 1847 1848 /* 1849 * If anonymous vma has not yet been faulted, update new pgoff 1850 * to match new location, to increase its chance of merging. 1851 */ 1852 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { 1853 pgoff = addr >> PAGE_SHIFT; 1854 faulted_in_anon_vma = false; 1855 } 1856 1857 /* 1858 * If the VMA we are copying might contain a uprobe PTE, ensure 1859 * that we do not establish one upon merge. Otherwise, when mremap() 1860 * moves page tables, it will orphan the newly created PTE. 1861 */ 1862 if (vma->vm_file) 1863 vmg.skip_vma_uprobe = true; 1864 1865 new_vma = find_vma_prev(mm, addr, &vmg.prev); 1866 if (new_vma && new_vma->vm_start < addr + len) 1867 return NULL; /* should never get here */ 1868 1869 vmg.pgoff = pgoff; 1870 vmg.next = vma_iter_next_rewind(&vmi, NULL); 1871 new_vma = vma_merge_copied_range(&vmg); 1872 1873 if (new_vma) { 1874 /* 1875 * Source vma may have been merged into new_vma 1876 */ 1877 if (unlikely(vma_start >= new_vma->vm_start && 1878 vma_start < new_vma->vm_end)) { 1879 /* 1880 * The only way we can get a vma_merge with 1881 * self during an mremap is if the vma hasn't 1882 * been faulted in yet and we were allowed to 1883 * reset the dst vma->vm_pgoff to the 1884 * destination address of the mremap to allow 1885 * the merge to happen. mremap must change the 1886 * vm_pgoff linearity between src and dst vmas 1887 * (in turn preventing a vma_merge) to be 1888 * safe. It is only safe to keep the vm_pgoff 1889 * linear if there are no pages mapped yet. 1890 */ 1891 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); 1892 *vmap = vma = new_vma; 1893 } 1894 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); 1895 } else { 1896 new_vma = vm_area_dup(vma); 1897 if (!new_vma) 1898 goto out; 1899 vma_set_range(new_vma, addr, addr + len, pgoff); 1900 if (vma_dup_policy(vma, new_vma)) 1901 goto out_free_vma; 1902 if (anon_vma_clone(new_vma, vma)) 1903 goto out_free_mempol; 1904 if (new_vma->vm_file) 1905 get_file(new_vma->vm_file); 1906 if (new_vma->vm_ops && new_vma->vm_ops->open) 1907 new_vma->vm_ops->open(new_vma); 1908 if (vma_link(mm, new_vma)) 1909 goto out_vma_link; 1910 *need_rmap_locks = false; 1911 } 1912 return new_vma; 1913 1914 out_vma_link: 1915 fixup_hugetlb_reservations(new_vma); 1916 vma_close(new_vma); 1917 1918 if (new_vma->vm_file) 1919 fput(new_vma->vm_file); 1920 1921 unlink_anon_vmas(new_vma); 1922 out_free_mempol: 1923 mpol_put(vma_policy(new_vma)); 1924 out_free_vma: 1925 vm_area_free(new_vma); 1926 out: 1927 return NULL; 1928 } 1929 1930 /* 1931 * Rough compatibility check to quickly see if it's even worth looking 1932 * at sharing an anon_vma. 1933 * 1934 * They need to have the same vm_file, and the flags can only differ 1935 * in things that mprotect may change. 1936 * 1937 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that 1938 * we can merge the two vma's. For example, we refuse to merge a vma if 1939 * there is a vm_ops->close() function, because that indicates that the 1940 * driver is doing some kind of reference counting. But that doesn't 1941 * really matter for the anon_vma sharing case. 1942 */ 1943 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) 1944 { 1945 return a->vm_end == b->vm_start && 1946 mpol_equal(vma_policy(a), vma_policy(b)) && 1947 a->vm_file == b->vm_file && 1948 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_IGNORE_MERGE)) && 1949 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); 1950 } 1951 1952 /* 1953 * Do some basic sanity checking to see if we can re-use the anon_vma 1954 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be 1955 * the same as 'old', the other will be the new one that is trying 1956 * to share the anon_vma. 1957 * 1958 * NOTE! This runs with mmap_lock held for reading, so it is possible that 1959 * the anon_vma of 'old' is concurrently in the process of being set up 1960 * by another page fault trying to merge _that_. But that's ok: if it 1961 * is being set up, that automatically means that it will be a singleton 1962 * acceptable for merging, so we can do all of this optimistically. But 1963 * we do that READ_ONCE() to make sure that we never re-load the pointer. 1964 * 1965 * IOW: that the "list_is_singular()" test on the anon_vma_chain only 1966 * matters for the 'stable anon_vma' case (ie the thing we want to avoid 1967 * is to return an anon_vma that is "complex" due to having gone through 1968 * a fork). 1969 * 1970 * We also make sure that the two vma's are compatible (adjacent, 1971 * and with the same memory policies). That's all stable, even with just 1972 * a read lock on the mmap_lock. 1973 */ 1974 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, 1975 struct vm_area_struct *a, 1976 struct vm_area_struct *b) 1977 { 1978 if (anon_vma_compatible(a, b)) { 1979 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); 1980 1981 if (anon_vma && list_is_singular(&old->anon_vma_chain)) 1982 return anon_vma; 1983 } 1984 return NULL; 1985 } 1986 1987 /* 1988 * find_mergeable_anon_vma is used by anon_vma_prepare, to check 1989 * neighbouring vmas for a suitable anon_vma, before it goes off 1990 * to allocate a new anon_vma. It checks because a repetitive 1991 * sequence of mprotects and faults may otherwise lead to distinct 1992 * anon_vmas being allocated, preventing vma merge in subsequent 1993 * mprotect. 1994 */ 1995 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) 1996 { 1997 struct anon_vma *anon_vma = NULL; 1998 struct vm_area_struct *prev, *next; 1999 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end); 2000 2001 /* Try next first. */ 2002 next = vma_iter_load(&vmi); 2003 if (next) { 2004 anon_vma = reusable_anon_vma(next, vma, next); 2005 if (anon_vma) 2006 return anon_vma; 2007 } 2008 2009 prev = vma_prev(&vmi); 2010 VM_BUG_ON_VMA(prev != vma, vma); 2011 prev = vma_prev(&vmi); 2012 /* Try prev next. */ 2013 if (prev) 2014 anon_vma = reusable_anon_vma(prev, prev, vma); 2015 2016 /* 2017 * We might reach here with anon_vma == NULL if we can't find 2018 * any reusable anon_vma. 2019 * There's no absolute need to look only at touching neighbours: 2020 * we could search further afield for "compatible" anon_vmas. 2021 * But it would probably just be a waste of time searching, 2022 * or lead to too many vmas hanging off the same anon_vma. 2023 * We're trying to allow mprotect remerging later on, 2024 * not trying to minimize memory used for anon_vmas. 2025 */ 2026 return anon_vma; 2027 } 2028 2029 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops) 2030 { 2031 return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite); 2032 } 2033 2034 static bool vma_is_shared_writable(struct vm_area_struct *vma) 2035 { 2036 return (vma->vm_flags & (VM_WRITE | VM_SHARED)) == 2037 (VM_WRITE | VM_SHARED); 2038 } 2039 2040 static bool vma_fs_can_writeback(struct vm_area_struct *vma) 2041 { 2042 /* No managed pages to writeback. */ 2043 if (vma->vm_flags & VM_PFNMAP) 2044 return false; 2045 2046 return vma->vm_file && vma->vm_file->f_mapping && 2047 mapping_can_writeback(vma->vm_file->f_mapping); 2048 } 2049 2050 /* 2051 * Does this VMA require the underlying folios to have their dirty state 2052 * tracked? 2053 */ 2054 bool vma_needs_dirty_tracking(struct vm_area_struct *vma) 2055 { 2056 /* Only shared, writable VMAs require dirty tracking. */ 2057 if (!vma_is_shared_writable(vma)) 2058 return false; 2059 2060 /* Does the filesystem need to be notified? */ 2061 if (vm_ops_needs_writenotify(vma->vm_ops)) 2062 return true; 2063 2064 /* 2065 * Even if the filesystem doesn't indicate a need for writenotify, if it 2066 * can writeback, dirty tracking is still required. 2067 */ 2068 return vma_fs_can_writeback(vma); 2069 } 2070 2071 /* 2072 * Some shared mappings will want the pages marked read-only 2073 * to track write events. If so, we'll downgrade vm_page_prot 2074 * to the private version (using protection_map[] without the 2075 * VM_SHARED bit). 2076 */ 2077 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) 2078 { 2079 /* If it was private or non-writable, the write bit is already clear */ 2080 if (!vma_is_shared_writable(vma)) 2081 return false; 2082 2083 /* The backer wishes to know when pages are first written to? */ 2084 if (vm_ops_needs_writenotify(vma->vm_ops)) 2085 return true; 2086 2087 /* The open routine did something to the protections that pgprot_modify 2088 * won't preserve? */ 2089 if (pgprot_val(vm_page_prot) != 2090 pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags))) 2091 return false; 2092 2093 /* 2094 * Do we need to track softdirty? hugetlb does not support softdirty 2095 * tracking yet. 2096 */ 2097 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma)) 2098 return true; 2099 2100 /* Do we need write faults for uffd-wp tracking? */ 2101 if (userfaultfd_wp(vma)) 2102 return true; 2103 2104 /* Can the mapping track the dirty pages? */ 2105 return vma_fs_can_writeback(vma); 2106 } 2107 2108 static DEFINE_MUTEX(mm_all_locks_mutex); 2109 2110 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) 2111 { 2112 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 2113 /* 2114 * The LSB of head.next can't change from under us 2115 * because we hold the mm_all_locks_mutex. 2116 */ 2117 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); 2118 /* 2119 * We can safely modify head.next after taking the 2120 * anon_vma->root->rwsem. If some other vma in this mm shares 2121 * the same anon_vma we won't take it again. 2122 * 2123 * No need of atomic instructions here, head.next 2124 * can't change from under us thanks to the 2125 * anon_vma->root->rwsem. 2126 */ 2127 if (__test_and_set_bit(0, (unsigned long *) 2128 &anon_vma->root->rb_root.rb_root.rb_node)) 2129 BUG(); 2130 } 2131 } 2132 2133 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) 2134 { 2135 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 2136 /* 2137 * AS_MM_ALL_LOCKS can't change from under us because 2138 * we hold the mm_all_locks_mutex. 2139 * 2140 * Operations on ->flags have to be atomic because 2141 * even if AS_MM_ALL_LOCKS is stable thanks to the 2142 * mm_all_locks_mutex, there may be other cpus 2143 * changing other bitflags in parallel to us. 2144 */ 2145 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) 2146 BUG(); 2147 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); 2148 } 2149 } 2150 2151 /* 2152 * This operation locks against the VM for all pte/vma/mm related 2153 * operations that could ever happen on a certain mm. This includes 2154 * vmtruncate, try_to_unmap, and all page faults. 2155 * 2156 * The caller must take the mmap_lock in write mode before calling 2157 * mm_take_all_locks(). The caller isn't allowed to release the 2158 * mmap_lock until mm_drop_all_locks() returns. 2159 * 2160 * mmap_lock in write mode is required in order to block all operations 2161 * that could modify pagetables and free pages without need of 2162 * altering the vma layout. It's also needed in write mode to avoid new 2163 * anon_vmas to be associated with existing vmas. 2164 * 2165 * A single task can't take more than one mm_take_all_locks() in a row 2166 * or it would deadlock. 2167 * 2168 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in 2169 * mapping->flags avoid to take the same lock twice, if more than one 2170 * vma in this mm is backed by the same anon_vma or address_space. 2171 * 2172 * We take locks in following order, accordingly to comment at beginning 2173 * of mm/rmap.c: 2174 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for 2175 * hugetlb mapping); 2176 * - all vmas marked locked 2177 * - all i_mmap_rwsem locks; 2178 * - all anon_vma->rwseml 2179 * 2180 * We can take all locks within these types randomly because the VM code 2181 * doesn't nest them and we protected from parallel mm_take_all_locks() by 2182 * mm_all_locks_mutex. 2183 * 2184 * mm_take_all_locks() and mm_drop_all_locks are expensive operations 2185 * that may have to take thousand of locks. 2186 * 2187 * mm_take_all_locks() can fail if it's interrupted by signals. 2188 */ 2189 int mm_take_all_locks(struct mm_struct *mm) 2190 { 2191 struct vm_area_struct *vma; 2192 struct anon_vma_chain *avc; 2193 VMA_ITERATOR(vmi, mm, 0); 2194 2195 mmap_assert_write_locked(mm); 2196 2197 mutex_lock(&mm_all_locks_mutex); 2198 2199 /* 2200 * vma_start_write() does not have a complement in mm_drop_all_locks() 2201 * because vma_start_write() is always asymmetrical; it marks a VMA as 2202 * being written to until mmap_write_unlock() or mmap_write_downgrade() 2203 * is reached. 2204 */ 2205 for_each_vma(vmi, vma) { 2206 if (signal_pending(current)) 2207 goto out_unlock; 2208 vma_start_write(vma); 2209 } 2210 2211 vma_iter_init(&vmi, mm, 0); 2212 for_each_vma(vmi, vma) { 2213 if (signal_pending(current)) 2214 goto out_unlock; 2215 if (vma->vm_file && vma->vm_file->f_mapping && 2216 is_vm_hugetlb_page(vma)) 2217 vm_lock_mapping(mm, vma->vm_file->f_mapping); 2218 } 2219 2220 vma_iter_init(&vmi, mm, 0); 2221 for_each_vma(vmi, vma) { 2222 if (signal_pending(current)) 2223 goto out_unlock; 2224 if (vma->vm_file && vma->vm_file->f_mapping && 2225 !is_vm_hugetlb_page(vma)) 2226 vm_lock_mapping(mm, vma->vm_file->f_mapping); 2227 } 2228 2229 vma_iter_init(&vmi, mm, 0); 2230 for_each_vma(vmi, vma) { 2231 if (signal_pending(current)) 2232 goto out_unlock; 2233 if (vma->anon_vma) 2234 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 2235 vm_lock_anon_vma(mm, avc->anon_vma); 2236 } 2237 2238 return 0; 2239 2240 out_unlock: 2241 mm_drop_all_locks(mm); 2242 return -EINTR; 2243 } 2244 2245 static void vm_unlock_anon_vma(struct anon_vma *anon_vma) 2246 { 2247 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 2248 /* 2249 * The LSB of head.next can't change to 0 from under 2250 * us because we hold the mm_all_locks_mutex. 2251 * 2252 * We must however clear the bitflag before unlocking 2253 * the vma so the users using the anon_vma->rb_root will 2254 * never see our bitflag. 2255 * 2256 * No need of atomic instructions here, head.next 2257 * can't change from under us until we release the 2258 * anon_vma->root->rwsem. 2259 */ 2260 if (!__test_and_clear_bit(0, (unsigned long *) 2261 &anon_vma->root->rb_root.rb_root.rb_node)) 2262 BUG(); 2263 anon_vma_unlock_write(anon_vma); 2264 } 2265 } 2266 2267 static void vm_unlock_mapping(struct address_space *mapping) 2268 { 2269 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 2270 /* 2271 * AS_MM_ALL_LOCKS can't change to 0 from under us 2272 * because we hold the mm_all_locks_mutex. 2273 */ 2274 i_mmap_unlock_write(mapping); 2275 if (!test_and_clear_bit(AS_MM_ALL_LOCKS, 2276 &mapping->flags)) 2277 BUG(); 2278 } 2279 } 2280 2281 /* 2282 * The mmap_lock cannot be released by the caller until 2283 * mm_drop_all_locks() returns. 2284 */ 2285 void mm_drop_all_locks(struct mm_struct *mm) 2286 { 2287 struct vm_area_struct *vma; 2288 struct anon_vma_chain *avc; 2289 VMA_ITERATOR(vmi, mm, 0); 2290 2291 mmap_assert_write_locked(mm); 2292 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); 2293 2294 for_each_vma(vmi, vma) { 2295 if (vma->anon_vma) 2296 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 2297 vm_unlock_anon_vma(avc->anon_vma); 2298 if (vma->vm_file && vma->vm_file->f_mapping) 2299 vm_unlock_mapping(vma->vm_file->f_mapping); 2300 } 2301 2302 mutex_unlock(&mm_all_locks_mutex); 2303 } 2304 2305 /* 2306 * We account for memory if it's a private writeable mapping, 2307 * not hugepages and VM_NORESERVE wasn't set. 2308 */ 2309 static bool accountable_mapping(struct file *file, vm_flags_t vm_flags) 2310 { 2311 /* 2312 * hugetlb has its own accounting separate from the core VM 2313 * VM_HUGETLB may not be set yet so we cannot check for that flag. 2314 */ 2315 if (file && is_file_hugepages(file)) 2316 return false; 2317 2318 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; 2319 } 2320 2321 /* 2322 * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap() 2323 * operation. 2324 * @vms: The vma unmap structure 2325 * @mas_detach: The maple state with the detached maple tree 2326 * 2327 * Reattach any detached vmas, free up the maple tree used to track the vmas. 2328 * If that's not possible because the ptes are cleared (and vm_ops->closed() may 2329 * have been called), then a NULL is written over the vmas and the vmas are 2330 * removed (munmap() completed). 2331 */ 2332 static void vms_abort_munmap_vmas(struct vma_munmap_struct *vms, 2333 struct ma_state *mas_detach) 2334 { 2335 struct ma_state *mas = &vms->vmi->mas; 2336 2337 if (!vms->nr_pages) 2338 return; 2339 2340 if (vms->clear_ptes) 2341 return reattach_vmas(mas_detach); 2342 2343 /* 2344 * Aborting cannot just call the vm_ops open() because they are often 2345 * not symmetrical and state data has been lost. Resort to the old 2346 * failure method of leaving a gap where the MAP_FIXED mapping failed. 2347 */ 2348 mas_set_range(mas, vms->start, vms->end - 1); 2349 mas_store_gfp(mas, NULL, GFP_KERNEL|__GFP_NOFAIL); 2350 /* Clean up the insertion of the unfortunate gap */ 2351 vms_complete_munmap_vmas(vms, mas_detach); 2352 } 2353 2354 static void update_ksm_flags(struct mmap_state *map) 2355 { 2356 map->vm_flags = ksm_vma_flags(map->mm, map->file, map->vm_flags); 2357 } 2358 2359 static void set_desc_from_map(struct vm_area_desc *desc, 2360 const struct mmap_state *map) 2361 { 2362 desc->start = map->addr; 2363 desc->end = map->end; 2364 2365 desc->pgoff = map->pgoff; 2366 desc->vm_file = map->file; 2367 desc->vm_flags = map->vm_flags; 2368 desc->page_prot = map->page_prot; 2369 } 2370 2371 /* 2372 * __mmap_setup() - Prepare to gather any overlapping VMAs that need to be 2373 * unmapped once the map operation is completed, check limits, account mapping 2374 * and clean up any pre-existing VMAs. 2375 * 2376 * As a result it sets up the @map and @desc objects. 2377 * 2378 * @map: Mapping state. 2379 * @desc: VMA descriptor 2380 * @uf: Userfaultfd context list. 2381 * 2382 * Returns: 0 on success, error code otherwise. 2383 */ 2384 static int __mmap_setup(struct mmap_state *map, struct vm_area_desc *desc, 2385 struct list_head *uf) 2386 { 2387 int error; 2388 struct vma_iterator *vmi = map->vmi; 2389 struct vma_munmap_struct *vms = &map->vms; 2390 2391 /* Find the first overlapping VMA and initialise unmap state. */ 2392 vms->vma = vma_find(vmi, map->end); 2393 init_vma_munmap(vms, vmi, vms->vma, map->addr, map->end, uf, 2394 /* unlock = */ false); 2395 2396 /* OK, we have overlapping VMAs - prepare to unmap them. */ 2397 if (vms->vma) { 2398 mt_init_flags(&map->mt_detach, 2399 vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); 2400 mt_on_stack(map->mt_detach); 2401 mas_init(&map->mas_detach, &map->mt_detach, /* addr = */ 0); 2402 /* Prepare to unmap any existing mapping in the area */ 2403 error = vms_gather_munmap_vmas(vms, &map->mas_detach); 2404 if (error) { 2405 /* On error VMAs will already have been reattached. */ 2406 vms->nr_pages = 0; 2407 return error; 2408 } 2409 2410 map->next = vms->next; 2411 map->prev = vms->prev; 2412 } else { 2413 map->next = vma_iter_next_rewind(vmi, &map->prev); 2414 } 2415 2416 /* Check against address space limit. */ 2417 if (!may_expand_vm(map->mm, map->vm_flags, map->pglen - vms->nr_pages)) 2418 return -ENOMEM; 2419 2420 /* Private writable mapping: check memory availability. */ 2421 if (accountable_mapping(map->file, map->vm_flags)) { 2422 map->charged = map->pglen; 2423 map->charged -= vms->nr_accounted; 2424 if (map->charged) { 2425 error = security_vm_enough_memory_mm(map->mm, map->charged); 2426 if (error) 2427 return error; 2428 } 2429 2430 vms->nr_accounted = 0; 2431 map->vm_flags |= VM_ACCOUNT; 2432 } 2433 2434 /* 2435 * Clear PTEs while the vma is still in the tree so that rmap 2436 * cannot race with the freeing later in the truncate scenario. 2437 * This is also needed for mmap_file(), which is why vm_ops 2438 * close function is called. 2439 */ 2440 vms_clean_up_area(vms, &map->mas_detach); 2441 2442 set_desc_from_map(desc, map); 2443 return 0; 2444 } 2445 2446 2447 static int __mmap_new_file_vma(struct mmap_state *map, 2448 struct vm_area_struct *vma) 2449 { 2450 struct vma_iterator *vmi = map->vmi; 2451 int error; 2452 2453 vma->vm_file = get_file(map->file); 2454 2455 if (!map->file->f_op->mmap) 2456 return 0; 2457 2458 error = mmap_file(vma->vm_file, vma); 2459 if (error) { 2460 fput(vma->vm_file); 2461 vma->vm_file = NULL; 2462 2463 vma_iter_set(vmi, vma->vm_end); 2464 /* Undo any partial mapping done by a device driver. */ 2465 unmap_region(&vmi->mas, vma, map->prev, map->next); 2466 2467 return error; 2468 } 2469 2470 /* Drivers cannot alter the address of the VMA. */ 2471 WARN_ON_ONCE(map->addr != vma->vm_start); 2472 /* 2473 * Drivers should not permit writability when previously it was 2474 * disallowed. 2475 */ 2476 VM_WARN_ON_ONCE(map->vm_flags != vma->vm_flags && 2477 !(map->vm_flags & VM_MAYWRITE) && 2478 (vma->vm_flags & VM_MAYWRITE)); 2479 2480 map->file = vma->vm_file; 2481 map->vm_flags = vma->vm_flags; 2482 2483 return 0; 2484 } 2485 2486 /* 2487 * __mmap_new_vma() - Allocate a new VMA for the region, as merging was not 2488 * possible. 2489 * 2490 * @map: Mapping state. 2491 * @vmap: Output pointer for the new VMA. 2492 * 2493 * Returns: Zero on success, or an error. 2494 */ 2495 static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap) 2496 { 2497 struct vma_iterator *vmi = map->vmi; 2498 int error = 0; 2499 struct vm_area_struct *vma; 2500 2501 /* 2502 * Determine the object being mapped and call the appropriate 2503 * specific mapper. the address has already been validated, but 2504 * not unmapped, but the maps are removed from the list. 2505 */ 2506 vma = vm_area_alloc(map->mm); 2507 if (!vma) 2508 return -ENOMEM; 2509 2510 vma_iter_config(vmi, map->addr, map->end); 2511 vma_set_range(vma, map->addr, map->end, map->pgoff); 2512 vm_flags_init(vma, map->vm_flags); 2513 vma->vm_page_prot = map->page_prot; 2514 2515 if (vma_iter_prealloc(vmi, vma)) { 2516 error = -ENOMEM; 2517 goto free_vma; 2518 } 2519 2520 if (map->file) 2521 error = __mmap_new_file_vma(map, vma); 2522 else if (map->vm_flags & VM_SHARED) 2523 error = shmem_zero_setup(vma); 2524 else 2525 vma_set_anonymous(vma); 2526 2527 if (error) 2528 goto free_iter_vma; 2529 2530 if (!map->check_ksm_early) { 2531 update_ksm_flags(map); 2532 vm_flags_init(vma, map->vm_flags); 2533 } 2534 2535 #ifdef CONFIG_SPARC64 2536 /* TODO: Fix SPARC ADI! */ 2537 WARN_ON_ONCE(!arch_validate_flags(map->vm_flags)); 2538 #endif 2539 2540 /* Lock the VMA since it is modified after insertion into VMA tree */ 2541 vma_start_write(vma); 2542 vma_iter_store_new(vmi, vma); 2543 map->mm->map_count++; 2544 vma_link_file(vma, map->hold_file_rmap_lock); 2545 2546 /* 2547 * vma_merge_new_range() calls khugepaged_enter_vma() too, the below 2548 * call covers the non-merge case. 2549 */ 2550 if (!vma_is_anonymous(vma)) 2551 khugepaged_enter_vma(vma, map->vm_flags); 2552 *vmap = vma; 2553 return 0; 2554 2555 free_iter_vma: 2556 vma_iter_free(vmi); 2557 free_vma: 2558 vm_area_free(vma); 2559 return error; 2560 } 2561 2562 /* 2563 * __mmap_complete() - Unmap any VMAs we overlap, account memory mapping 2564 * statistics, handle locking and finalise the VMA. 2565 * 2566 * @map: Mapping state. 2567 * @vma: Merged or newly allocated VMA for the mmap()'d region. 2568 */ 2569 static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma) 2570 { 2571 struct mm_struct *mm = map->mm; 2572 vm_flags_t vm_flags = vma->vm_flags; 2573 2574 perf_event_mmap(vma); 2575 2576 /* Unmap any existing mapping in the area. */ 2577 vms_complete_munmap_vmas(&map->vms, &map->mas_detach); 2578 2579 vm_stat_account(mm, vma->vm_flags, map->pglen); 2580 if (vm_flags & VM_LOCKED) { 2581 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || 2582 is_vm_hugetlb_page(vma) || 2583 vma == get_gate_vma(mm)) 2584 vm_flags_clear(vma, VM_LOCKED_MASK); 2585 else 2586 mm->locked_vm += map->pglen; 2587 } 2588 2589 if (vma->vm_file) 2590 uprobe_mmap(vma); 2591 2592 /* 2593 * New (or expanded) vma always get soft dirty status. 2594 * Otherwise user-space soft-dirty page tracker won't 2595 * be able to distinguish situation when vma area unmapped, 2596 * then new mapped in-place (which must be aimed as 2597 * a completely new data area). 2598 */ 2599 if (pgtable_supports_soft_dirty()) 2600 vm_flags_set(vma, VM_SOFTDIRTY); 2601 2602 vma_set_page_prot(vma); 2603 } 2604 2605 static void call_action_prepare(struct mmap_state *map, 2606 struct vm_area_desc *desc) 2607 { 2608 struct mmap_action *action = &desc->action; 2609 2610 mmap_action_prepare(action, desc); 2611 2612 if (action->hide_from_rmap_until_complete) 2613 map->hold_file_rmap_lock = true; 2614 } 2615 2616 /* 2617 * Invoke the f_op->mmap_prepare() callback for a file-backed mapping that 2618 * specifies it. 2619 * 2620 * This is called prior to any merge attempt, and updates whitelisted fields 2621 * that are permitted to be updated by the caller. 2622 * 2623 * All but user-defined fields will be pre-populated with original values. 2624 * 2625 * Returns 0 on success, or an error code otherwise. 2626 */ 2627 static int call_mmap_prepare(struct mmap_state *map, 2628 struct vm_area_desc *desc) 2629 { 2630 int err; 2631 2632 /* Invoke the hook. */ 2633 err = vfs_mmap_prepare(map->file, desc); 2634 if (err) 2635 return err; 2636 2637 call_action_prepare(map, desc); 2638 2639 /* Update fields permitted to be changed. */ 2640 map->pgoff = desc->pgoff; 2641 map->file = desc->vm_file; 2642 map->vm_flags = desc->vm_flags; 2643 map->page_prot = desc->page_prot; 2644 /* User-defined fields. */ 2645 map->vm_ops = desc->vm_ops; 2646 map->vm_private_data = desc->private_data; 2647 2648 return 0; 2649 } 2650 2651 static void set_vma_user_defined_fields(struct vm_area_struct *vma, 2652 struct mmap_state *map) 2653 { 2654 if (map->vm_ops) 2655 vma->vm_ops = map->vm_ops; 2656 vma->vm_private_data = map->vm_private_data; 2657 } 2658 2659 /* 2660 * Are we guaranteed no driver can change state such as to preclude KSM merging? 2661 * If so, let's set the KSM mergeable flag early so we don't break VMA merging. 2662 */ 2663 static bool can_set_ksm_flags_early(struct mmap_state *map) 2664 { 2665 struct file *file = map->file; 2666 2667 /* Anonymous mappings have no driver which can change them. */ 2668 if (!file) 2669 return true; 2670 2671 /* 2672 * If .mmap_prepare() is specified, then the driver will have already 2673 * manipulated state prior to updating KSM flags. So no need to worry 2674 * about mmap callbacks modifying VMA flags after the KSM flag has been 2675 * updated here, which could otherwise affect KSM eligibility. 2676 */ 2677 if (file->f_op->mmap_prepare) 2678 return true; 2679 2680 /* shmem is safe. */ 2681 if (shmem_file(file)) 2682 return true; 2683 2684 /* Any other .mmap callback is not safe. */ 2685 return false; 2686 } 2687 2688 static int call_action_complete(struct mmap_state *map, 2689 struct vm_area_desc *desc, 2690 struct vm_area_struct *vma) 2691 { 2692 struct mmap_action *action = &desc->action; 2693 int ret; 2694 2695 ret = mmap_action_complete(action, vma); 2696 2697 /* If we held the file rmap we need to release it. */ 2698 if (map->hold_file_rmap_lock) { 2699 struct file *file = vma->vm_file; 2700 2701 i_mmap_unlock_write(file->f_mapping); 2702 } 2703 return ret; 2704 } 2705 2706 static unsigned long __mmap_region(struct file *file, unsigned long addr, 2707 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, 2708 struct list_head *uf) 2709 { 2710 struct mm_struct *mm = current->mm; 2711 struct vm_area_struct *vma = NULL; 2712 bool have_mmap_prepare = file && file->f_op->mmap_prepare; 2713 VMA_ITERATOR(vmi, mm, addr); 2714 MMAP_STATE(map, mm, &vmi, addr, len, pgoff, vm_flags, file); 2715 struct vm_area_desc desc = { 2716 .mm = mm, 2717 .file = file, 2718 .action = { 2719 .type = MMAP_NOTHING, /* Default to no further action. */ 2720 }, 2721 }; 2722 bool allocated_new = false; 2723 int error; 2724 2725 map.check_ksm_early = can_set_ksm_flags_early(&map); 2726 2727 error = __mmap_setup(&map, &desc, uf); 2728 if (!error && have_mmap_prepare) 2729 error = call_mmap_prepare(&map, &desc); 2730 if (error) 2731 goto abort_munmap; 2732 2733 if (map.check_ksm_early) 2734 update_ksm_flags(&map); 2735 2736 /* Attempt to merge with adjacent VMAs... */ 2737 if (map.prev || map.next) { 2738 VMG_MMAP_STATE(vmg, &map, /* vma = */ NULL); 2739 2740 vma = vma_merge_new_range(&vmg); 2741 } 2742 2743 /* ...but if we can't, allocate a new VMA. */ 2744 if (!vma) { 2745 error = __mmap_new_vma(&map, &vma); 2746 if (error) 2747 goto unacct_error; 2748 allocated_new = true; 2749 } 2750 2751 if (have_mmap_prepare) 2752 set_vma_user_defined_fields(vma, &map); 2753 2754 __mmap_complete(&map, vma); 2755 2756 if (have_mmap_prepare && allocated_new) { 2757 error = call_action_complete(&map, &desc, vma); 2758 2759 if (error) 2760 return error; 2761 } 2762 2763 return addr; 2764 2765 /* Accounting was done by __mmap_setup(). */ 2766 unacct_error: 2767 if (map.charged) 2768 vm_unacct_memory(map.charged); 2769 abort_munmap: 2770 vms_abort_munmap_vmas(&map.vms, &map.mas_detach); 2771 return error; 2772 } 2773 2774 /** 2775 * mmap_region() - Actually perform the userland mapping of a VMA into 2776 * current->mm with known, aligned and overflow-checked @addr and @len, and 2777 * correctly determined VMA flags @vm_flags and page offset @pgoff. 2778 * 2779 * This is an internal memory management function, and should not be used 2780 * directly. 2781 * 2782 * The caller must write-lock current->mm->mmap_lock. 2783 * 2784 * @file: If a file-backed mapping, a pointer to the struct file describing the 2785 * file to be mapped, otherwise NULL. 2786 * @addr: The page-aligned address at which to perform the mapping. 2787 * @len: The page-aligned, non-zero, length of the mapping. 2788 * @vm_flags: The VMA flags which should be applied to the mapping. 2789 * @pgoff: If @file is specified, the page offset into the file, if not then 2790 * the virtual page offset in memory of the anonymous mapping. 2791 * @uf: Optionally, a pointer to a list head used for tracking userfaultfd unmap 2792 * events. 2793 * 2794 * Returns: Either an error, or the address at which the requested mapping has 2795 * been performed. 2796 */ 2797 unsigned long mmap_region(struct file *file, unsigned long addr, 2798 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, 2799 struct list_head *uf) 2800 { 2801 unsigned long ret; 2802 bool writable_file_mapping = false; 2803 2804 mmap_assert_write_locked(current->mm); 2805 2806 /* Check to see if MDWE is applicable. */ 2807 if (map_deny_write_exec(vm_flags, vm_flags)) 2808 return -EACCES; 2809 2810 /* Allow architectures to sanity-check the vm_flags. */ 2811 if (!arch_validate_flags(vm_flags)) 2812 return -EINVAL; 2813 2814 /* Map writable and ensure this isn't a sealed memfd. */ 2815 if (file && is_shared_maywrite(vm_flags)) { 2816 int error = mapping_map_writable(file->f_mapping); 2817 2818 if (error) 2819 return error; 2820 writable_file_mapping = true; 2821 } 2822 2823 ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf); 2824 2825 /* Clear our write mapping regardless of error. */ 2826 if (writable_file_mapping) 2827 mapping_unmap_writable(file->f_mapping); 2828 2829 validate_mm(current->mm); 2830 return ret; 2831 } 2832 2833 /* 2834 * do_brk_flags() - Increase the brk vma if the flags match. 2835 * @vmi: The vma iterator 2836 * @addr: The start address 2837 * @len: The length of the increase 2838 * @vma: The vma, 2839 * @vm_flags: The VMA Flags 2840 * 2841 * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags 2842 * do not match then create a new anonymous VMA. Eventually we may be able to 2843 * do some brk-specific accounting here. 2844 */ 2845 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, 2846 unsigned long addr, unsigned long len, vm_flags_t vm_flags) 2847 { 2848 struct mm_struct *mm = current->mm; 2849 2850 /* 2851 * Check against address space limits by the changed size 2852 * Note: This happens *after* clearing old mappings in some code paths. 2853 */ 2854 vm_flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; 2855 vm_flags = ksm_vma_flags(mm, NULL, vm_flags); 2856 if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) 2857 return -ENOMEM; 2858 2859 if (mm->map_count > sysctl_max_map_count) 2860 return -ENOMEM; 2861 2862 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) 2863 return -ENOMEM; 2864 2865 /* 2866 * Expand the existing vma if possible; Note that singular lists do not 2867 * occur after forking, so the expand will only happen on new VMAs. 2868 */ 2869 if (vma && vma->vm_end == addr) { 2870 VMG_STATE(vmg, mm, vmi, addr, addr + len, vm_flags, PHYS_PFN(addr)); 2871 2872 vmg.prev = vma; 2873 /* vmi is positioned at prev, which this mode expects. */ 2874 vmg.just_expand = true; 2875 2876 if (vma_merge_new_range(&vmg)) 2877 goto out; 2878 else if (vmg_nomem(&vmg)) 2879 goto unacct_fail; 2880 } 2881 2882 if (vma) 2883 vma_iter_next_range(vmi); 2884 /* create a vma struct for an anonymous mapping */ 2885 vma = vm_area_alloc(mm); 2886 if (!vma) 2887 goto unacct_fail; 2888 2889 vma_set_anonymous(vma); 2890 vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT); 2891 vm_flags_init(vma, vm_flags); 2892 vma->vm_page_prot = vm_get_page_prot(vm_flags); 2893 vma_start_write(vma); 2894 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) 2895 goto mas_store_fail; 2896 2897 mm->map_count++; 2898 validate_mm(mm); 2899 out: 2900 perf_event_mmap(vma); 2901 mm->total_vm += len >> PAGE_SHIFT; 2902 mm->data_vm += len >> PAGE_SHIFT; 2903 if (vm_flags & VM_LOCKED) 2904 mm->locked_vm += (len >> PAGE_SHIFT); 2905 if (pgtable_supports_soft_dirty()) 2906 vm_flags_set(vma, VM_SOFTDIRTY); 2907 return 0; 2908 2909 mas_store_fail: 2910 vm_area_free(vma); 2911 unacct_fail: 2912 vm_unacct_memory(len >> PAGE_SHIFT); 2913 return -ENOMEM; 2914 } 2915 2916 /** 2917 * unmapped_area() - Find an area between the low_limit and the high_limit with 2918 * the correct alignment and offset, all from @info. Note: current->mm is used 2919 * for the search. 2920 * 2921 * @info: The unmapped area information including the range [low_limit - 2922 * high_limit), the alignment offset and mask. 2923 * 2924 * Return: A memory address or -ENOMEM. 2925 */ 2926 unsigned long unmapped_area(struct vm_unmapped_area_info *info) 2927 { 2928 unsigned long length, gap; 2929 unsigned long low_limit, high_limit; 2930 struct vm_area_struct *tmp; 2931 VMA_ITERATOR(vmi, current->mm, 0); 2932 2933 /* Adjust search length to account for worst case alignment overhead */ 2934 length = info->length + info->align_mask + info->start_gap; 2935 if (length < info->length) 2936 return -ENOMEM; 2937 2938 low_limit = info->low_limit; 2939 if (low_limit < mmap_min_addr) 2940 low_limit = mmap_min_addr; 2941 high_limit = info->high_limit; 2942 retry: 2943 if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length)) 2944 return -ENOMEM; 2945 2946 /* 2947 * Adjust for the gap first so it doesn't interfere with the 2948 * later alignment. The first step is the minimum needed to 2949 * fulill the start gap, the next steps is the minimum to align 2950 * that. It is the minimum needed to fulill both. 2951 */ 2952 gap = vma_iter_addr(&vmi) + info->start_gap; 2953 gap += (info->align_offset - gap) & info->align_mask; 2954 tmp = vma_next(&vmi); 2955 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ 2956 if (vm_start_gap(tmp) < gap + length - 1) { 2957 low_limit = tmp->vm_end; 2958 vma_iter_reset(&vmi); 2959 goto retry; 2960 } 2961 } else { 2962 tmp = vma_prev(&vmi); 2963 if (tmp && vm_end_gap(tmp) > gap) { 2964 low_limit = vm_end_gap(tmp); 2965 vma_iter_reset(&vmi); 2966 goto retry; 2967 } 2968 } 2969 2970 return gap; 2971 } 2972 2973 /** 2974 * unmapped_area_topdown() - Find an area between the low_limit and the 2975 * high_limit with the correct alignment and offset at the highest available 2976 * address, all from @info. Note: current->mm is used for the search. 2977 * 2978 * @info: The unmapped area information including the range [low_limit - 2979 * high_limit), the alignment offset and mask. 2980 * 2981 * Return: A memory address or -ENOMEM. 2982 */ 2983 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) 2984 { 2985 unsigned long length, gap, gap_end; 2986 unsigned long low_limit, high_limit; 2987 struct vm_area_struct *tmp; 2988 VMA_ITERATOR(vmi, current->mm, 0); 2989 2990 /* Adjust search length to account for worst case alignment overhead */ 2991 length = info->length + info->align_mask + info->start_gap; 2992 if (length < info->length) 2993 return -ENOMEM; 2994 2995 low_limit = info->low_limit; 2996 if (low_limit < mmap_min_addr) 2997 low_limit = mmap_min_addr; 2998 high_limit = info->high_limit; 2999 retry: 3000 if (vma_iter_area_highest(&vmi, low_limit, high_limit, length)) 3001 return -ENOMEM; 3002 3003 gap = vma_iter_end(&vmi) - info->length; 3004 gap -= (gap - info->align_offset) & info->align_mask; 3005 gap_end = vma_iter_end(&vmi); 3006 tmp = vma_next(&vmi); 3007 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ 3008 if (vm_start_gap(tmp) < gap_end) { 3009 high_limit = vm_start_gap(tmp); 3010 vma_iter_reset(&vmi); 3011 goto retry; 3012 } 3013 } else { 3014 tmp = vma_prev(&vmi); 3015 if (tmp && vm_end_gap(tmp) > gap) { 3016 high_limit = tmp->vm_start; 3017 vma_iter_reset(&vmi); 3018 goto retry; 3019 } 3020 } 3021 3022 return gap; 3023 } 3024 3025 /* 3026 * Verify that the stack growth is acceptable and 3027 * update accounting. This is shared with both the 3028 * grow-up and grow-down cases. 3029 */ 3030 static int acct_stack_growth(struct vm_area_struct *vma, 3031 unsigned long size, unsigned long grow) 3032 { 3033 struct mm_struct *mm = vma->vm_mm; 3034 unsigned long new_start; 3035 3036 /* address space limit tests */ 3037 if (!may_expand_vm(mm, vma->vm_flags, grow)) 3038 return -ENOMEM; 3039 3040 /* Stack limit test */ 3041 if (size > rlimit(RLIMIT_STACK)) 3042 return -ENOMEM; 3043 3044 /* mlock limit tests */ 3045 if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT)) 3046 return -ENOMEM; 3047 3048 /* Check to ensure the stack will not grow into a hugetlb-only region */ 3049 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : 3050 vma->vm_end - size; 3051 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) 3052 return -EFAULT; 3053 3054 /* 3055 * Overcommit.. This must be the final test, as it will 3056 * update security statistics. 3057 */ 3058 if (security_vm_enough_memory_mm(mm, grow)) 3059 return -ENOMEM; 3060 3061 return 0; 3062 } 3063 3064 #if defined(CONFIG_STACK_GROWSUP) 3065 /* 3066 * PA-RISC uses this for its stack. 3067 * vma is the last one with address > vma->vm_end. Have to extend vma. 3068 */ 3069 int expand_upwards(struct vm_area_struct *vma, unsigned long address) 3070 { 3071 struct mm_struct *mm = vma->vm_mm; 3072 struct vm_area_struct *next; 3073 unsigned long gap_addr; 3074 int error = 0; 3075 VMA_ITERATOR(vmi, mm, vma->vm_start); 3076 3077 if (!(vma->vm_flags & VM_GROWSUP)) 3078 return -EFAULT; 3079 3080 mmap_assert_write_locked(mm); 3081 3082 /* Guard against exceeding limits of the address space. */ 3083 address &= PAGE_MASK; 3084 if (address >= (TASK_SIZE & PAGE_MASK)) 3085 return -ENOMEM; 3086 address += PAGE_SIZE; 3087 3088 /* Enforce stack_guard_gap */ 3089 gap_addr = address + stack_guard_gap; 3090 3091 /* Guard against overflow */ 3092 if (gap_addr < address || gap_addr > TASK_SIZE) 3093 gap_addr = TASK_SIZE; 3094 3095 next = find_vma_intersection(mm, vma->vm_end, gap_addr); 3096 if (next && vma_is_accessible(next)) { 3097 if (!(next->vm_flags & VM_GROWSUP)) 3098 return -ENOMEM; 3099 /* Check that both stack segments have the same anon_vma? */ 3100 } 3101 3102 if (next) 3103 vma_iter_prev_range_limit(&vmi, address); 3104 3105 vma_iter_config(&vmi, vma->vm_start, address); 3106 if (vma_iter_prealloc(&vmi, vma)) 3107 return -ENOMEM; 3108 3109 /* We must make sure the anon_vma is allocated. */ 3110 if (unlikely(anon_vma_prepare(vma))) { 3111 vma_iter_free(&vmi); 3112 return -ENOMEM; 3113 } 3114 3115 /* Lock the VMA before expanding to prevent concurrent page faults */ 3116 vma_start_write(vma); 3117 /* We update the anon VMA tree. */ 3118 anon_vma_lock_write(vma->anon_vma); 3119 3120 /* Somebody else might have raced and expanded it already */ 3121 if (address > vma->vm_end) { 3122 unsigned long size, grow; 3123 3124 size = address - vma->vm_start; 3125 grow = (address - vma->vm_end) >> PAGE_SHIFT; 3126 3127 error = -ENOMEM; 3128 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { 3129 error = acct_stack_growth(vma, size, grow); 3130 if (!error) { 3131 if (vma->vm_flags & VM_LOCKED) 3132 mm->locked_vm += grow; 3133 vm_stat_account(mm, vma->vm_flags, grow); 3134 anon_vma_interval_tree_pre_update_vma(vma); 3135 vma->vm_end = address; 3136 /* Overwrite old entry in mtree. */ 3137 vma_iter_store_overwrite(&vmi, vma); 3138 anon_vma_interval_tree_post_update_vma(vma); 3139 3140 perf_event_mmap(vma); 3141 } 3142 } 3143 } 3144 anon_vma_unlock_write(vma->anon_vma); 3145 vma_iter_free(&vmi); 3146 validate_mm(mm); 3147 return error; 3148 } 3149 #endif /* CONFIG_STACK_GROWSUP */ 3150 3151 /* 3152 * vma is the first one with address < vma->vm_start. Have to extend vma. 3153 * mmap_lock held for writing. 3154 */ 3155 int expand_downwards(struct vm_area_struct *vma, unsigned long address) 3156 { 3157 struct mm_struct *mm = vma->vm_mm; 3158 struct vm_area_struct *prev; 3159 int error = 0; 3160 VMA_ITERATOR(vmi, mm, vma->vm_start); 3161 3162 if (!(vma->vm_flags & VM_GROWSDOWN)) 3163 return -EFAULT; 3164 3165 mmap_assert_write_locked(mm); 3166 3167 address &= PAGE_MASK; 3168 if (address < mmap_min_addr || address < FIRST_USER_ADDRESS) 3169 return -EPERM; 3170 3171 /* Enforce stack_guard_gap */ 3172 prev = vma_prev(&vmi); 3173 /* Check that both stack segments have the same anon_vma? */ 3174 if (prev) { 3175 if (!(prev->vm_flags & VM_GROWSDOWN) && 3176 vma_is_accessible(prev) && 3177 (address - prev->vm_end < stack_guard_gap)) 3178 return -ENOMEM; 3179 } 3180 3181 if (prev) 3182 vma_iter_next_range_limit(&vmi, vma->vm_start); 3183 3184 vma_iter_config(&vmi, address, vma->vm_end); 3185 if (vma_iter_prealloc(&vmi, vma)) 3186 return -ENOMEM; 3187 3188 /* We must make sure the anon_vma is allocated. */ 3189 if (unlikely(anon_vma_prepare(vma))) { 3190 vma_iter_free(&vmi); 3191 return -ENOMEM; 3192 } 3193 3194 /* Lock the VMA before expanding to prevent concurrent page faults */ 3195 vma_start_write(vma); 3196 /* We update the anon VMA tree. */ 3197 anon_vma_lock_write(vma->anon_vma); 3198 3199 /* Somebody else might have raced and expanded it already */ 3200 if (address < vma->vm_start) { 3201 unsigned long size, grow; 3202 3203 size = vma->vm_end - address; 3204 grow = (vma->vm_start - address) >> PAGE_SHIFT; 3205 3206 error = -ENOMEM; 3207 if (grow <= vma->vm_pgoff) { 3208 error = acct_stack_growth(vma, size, grow); 3209 if (!error) { 3210 if (vma->vm_flags & VM_LOCKED) 3211 mm->locked_vm += grow; 3212 vm_stat_account(mm, vma->vm_flags, grow); 3213 anon_vma_interval_tree_pre_update_vma(vma); 3214 vma->vm_start = address; 3215 vma->vm_pgoff -= grow; 3216 /* Overwrite old entry in mtree. */ 3217 vma_iter_store_overwrite(&vmi, vma); 3218 anon_vma_interval_tree_post_update_vma(vma); 3219 3220 perf_event_mmap(vma); 3221 } 3222 } 3223 } 3224 anon_vma_unlock_write(vma->anon_vma); 3225 vma_iter_free(&vmi); 3226 validate_mm(mm); 3227 return error; 3228 } 3229 3230 int __vm_munmap(unsigned long start, size_t len, bool unlock) 3231 { 3232 int ret; 3233 struct mm_struct *mm = current->mm; 3234 LIST_HEAD(uf); 3235 VMA_ITERATOR(vmi, mm, start); 3236 3237 if (mmap_write_lock_killable(mm)) 3238 return -EINTR; 3239 3240 ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock); 3241 if (ret || !unlock) 3242 mmap_write_unlock(mm); 3243 3244 userfaultfd_unmap_complete(mm, &uf); 3245 return ret; 3246 } 3247 3248 /* Insert vm structure into process list sorted by address 3249 * and into the inode's i_mmap tree. If vm_file is non-NULL 3250 * then i_mmap_rwsem is taken here. 3251 */ 3252 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) 3253 { 3254 unsigned long charged = vma_pages(vma); 3255 3256 3257 if (find_vma_intersection(mm, vma->vm_start, vma->vm_end)) 3258 return -ENOMEM; 3259 3260 if ((vma->vm_flags & VM_ACCOUNT) && 3261 security_vm_enough_memory_mm(mm, charged)) 3262 return -ENOMEM; 3263 3264 /* 3265 * The vm_pgoff of a purely anonymous vma should be irrelevant 3266 * until its first write fault, when page's anon_vma and index 3267 * are set. But now set the vm_pgoff it will almost certainly 3268 * end up with (unless mremap moves it elsewhere before that 3269 * first wfault), so /proc/pid/maps tells a consistent story. 3270 * 3271 * By setting it to reflect the virtual start address of the 3272 * vma, merges and splits can happen in a seamless way, just 3273 * using the existing file pgoff checks and manipulations. 3274 * Similarly in do_mmap and in do_brk_flags. 3275 */ 3276 if (vma_is_anonymous(vma)) { 3277 BUG_ON(vma->anon_vma); 3278 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; 3279 } 3280 3281 if (vma_link(mm, vma)) { 3282 if (vma->vm_flags & VM_ACCOUNT) 3283 vm_unacct_memory(charged); 3284 return -ENOMEM; 3285 } 3286 3287 return 0; 3288 } 3289