1 // SPDX-License-Identifier: GPL-2.0-or-later 2 3 /* 4 * VMA-specific functions. 5 */ 6 7 #include "vma_internal.h" 8 #include "vma.h" 9 10 struct mmap_state { 11 struct mm_struct *mm; 12 struct vma_iterator *vmi; 13 14 unsigned long addr; 15 unsigned long end; 16 pgoff_t pgoff; 17 unsigned long pglen; 18 union { 19 vm_flags_t vm_flags; 20 vma_flags_t vma_flags; 21 }; 22 struct file *file; 23 pgprot_t page_prot; 24 25 /* User-defined fields, perhaps updated by .mmap_prepare(). */ 26 const struct vm_operations_struct *vm_ops; 27 void *vm_private_data; 28 29 unsigned long charged; 30 31 struct vm_area_struct *prev; 32 struct vm_area_struct *next; 33 34 /* Unmapping state. */ 35 struct vma_munmap_struct vms; 36 struct ma_state mas_detach; 37 struct maple_tree mt_detach; 38 39 /* Determine if we can check KSM flags early in mmap() logic. */ 40 bool check_ksm_early :1; 41 /* If .mmap_prepare changed the file, we don't need to pin. */ 42 bool file_doesnt_need_get :1; 43 }; 44 45 #define MMAP_STATE(name, mm_, vmi_, addr_, len_, pgoff_, vma_flags_, file_) \ 46 struct mmap_state name = { \ 47 .mm = mm_, \ 48 .vmi = vmi_, \ 49 .addr = addr_, \ 50 .end = (addr_) + (len_), \ 51 .pgoff = pgoff_, \ 52 .pglen = PHYS_PFN(len_), \ 53 .vma_flags = vma_flags_, \ 54 .file = file_, \ 55 .page_prot = vma_get_page_prot(vma_flags_), \ 56 } 57 58 #define VMG_MMAP_STATE(name, map_, vma_) \ 59 struct vma_merge_struct name = { \ 60 .mm = (map_)->mm, \ 61 .vmi = (map_)->vmi, \ 62 .start = (map_)->addr, \ 63 .end = (map_)->end, \ 64 .vma_flags = (map_)->vma_flags, \ 65 .pgoff = (map_)->pgoff, \ 66 .file = (map_)->file, \ 67 .prev = (map_)->prev, \ 68 .middle = vma_, \ 69 .next = (vma_) ? NULL : (map_)->next, \ 70 .state = VMA_MERGE_START, \ 71 } 72 73 /* Was this VMA ever forked from a parent, i.e. maybe contains CoW mappings? */ 74 static bool vma_is_fork_child(struct vm_area_struct *vma) 75 { 76 /* 77 * The list_is_singular() test is to avoid merging VMA cloned from 78 * parents. This can improve scalability caused by the anon_vma root 79 * lock. 80 */ 81 return vma && vma->anon_vma && !list_is_singular(&vma->anon_vma_chain); 82 } 83 84 static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next) 85 { 86 struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev; 87 vma_flags_t diff; 88 89 if (!mpol_equal(vmg->policy, vma_policy(vma))) 90 return false; 91 92 diff = vma_flags_diff_pair(&vma->flags, &vmg->vma_flags); 93 vma_flags_clear_mask(&diff, VMA_IGNORE_MERGE_FLAGS); 94 95 if (!vma_flags_empty(&diff)) 96 return false; 97 if (vma->vm_file != vmg->file) 98 return false; 99 if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx)) 100 return false; 101 if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name)) 102 return false; 103 return true; 104 } 105 106 static bool is_mergeable_anon_vma(struct vma_merge_struct *vmg, bool merge_next) 107 { 108 struct vm_area_struct *tgt = merge_next ? vmg->next : vmg->prev; 109 struct vm_area_struct *src = vmg->middle; /* existing merge case. */ 110 struct anon_vma *tgt_anon = tgt->anon_vma; 111 struct anon_vma *src_anon = vmg->anon_vma; 112 113 /* 114 * We _can_ have !src, vmg->anon_vma via copy_vma(). In this instance we 115 * will remove the existing VMA's anon_vma's so there's no scalability 116 * concerns. 117 */ 118 VM_WARN_ON(src && src_anon != src->anon_vma); 119 120 /* Case 1 - we will dup_anon_vma() from src into tgt. */ 121 if (!tgt_anon && src_anon) { 122 struct vm_area_struct *copied_from = vmg->copied_from; 123 124 if (vma_is_fork_child(src)) 125 return false; 126 if (vma_is_fork_child(copied_from)) 127 return false; 128 129 return true; 130 } 131 /* Case 2 - we will simply use tgt's anon_vma. */ 132 if (tgt_anon && !src_anon) 133 return !vma_is_fork_child(tgt); 134 /* Case 3 - the anon_vma's are already shared. */ 135 return src_anon == tgt_anon; 136 } 137 138 /* 139 * init_multi_vma_prep() - Initializer for struct vma_prepare 140 * @vp: The vma_prepare struct 141 * @vma: The vma that will be altered once locked 142 * @vmg: The merge state that will be used to determine adjustment and VMA 143 * removal. 144 */ 145 static void init_multi_vma_prep(struct vma_prepare *vp, 146 struct vm_area_struct *vma, 147 struct vma_merge_struct *vmg) 148 { 149 struct vm_area_struct *adjust; 150 struct vm_area_struct **remove = &vp->remove; 151 152 memset(vp, 0, sizeof(struct vma_prepare)); 153 vp->vma = vma; 154 vp->anon_vma = vma->anon_vma; 155 156 if (vmg && vmg->__remove_middle) { 157 *remove = vmg->middle; 158 remove = &vp->remove2; 159 } 160 if (vmg && vmg->__remove_next) 161 *remove = vmg->next; 162 163 if (vmg && vmg->__adjust_middle_start) 164 adjust = vmg->middle; 165 else if (vmg && vmg->__adjust_next_start) 166 adjust = vmg->next; 167 else 168 adjust = NULL; 169 170 vp->adj_next = adjust; 171 if (!vp->anon_vma && adjust) 172 vp->anon_vma = adjust->anon_vma; 173 174 VM_WARN_ON(vp->anon_vma && adjust && adjust->anon_vma && 175 vp->anon_vma != adjust->anon_vma); 176 177 vp->file = vma->vm_file; 178 if (vp->file) 179 vp->mapping = vma->vm_file->f_mapping; 180 181 if (vmg && vmg->skip_vma_uprobe) 182 vp->skip_vma_uprobe = true; 183 } 184 185 /* 186 * Return true if we can merge this (vma_flags,anon_vma,file,vm_pgoff) 187 * in front of (at a lower virtual address and file offset than) the vma. 188 * 189 * We cannot merge two vmas if they have differently assigned (non-NULL) 190 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 191 * 192 * We don't check here for the merged mmap wrapping around the end of pagecache 193 * indices (16TB on ia32) because do_mmap() does not permit mmap's which 194 * wrap, nor mmaps which cover the final page at index -1UL. 195 * 196 * We assume the vma may be removed as part of the merge. 197 */ 198 static bool can_vma_merge_before(struct vma_merge_struct *vmg) 199 { 200 pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start); 201 202 if (is_mergeable_vma(vmg, /* merge_next = */ true) && 203 is_mergeable_anon_vma(vmg, /* merge_next = */ true)) { 204 if (vmg->next->vm_pgoff == vmg->pgoff + pglen) 205 return true; 206 } 207 208 return false; 209 } 210 211 /* 212 * Return true if we can merge this (vma_flags,anon_vma,file,vm_pgoff) 213 * beyond (at a higher virtual address and file offset than) the vma. 214 * 215 * We cannot merge two vmas if they have differently assigned (non-NULL) 216 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 217 * 218 * We assume that vma is not removed as part of the merge. 219 */ 220 static bool can_vma_merge_after(struct vma_merge_struct *vmg) 221 { 222 if (is_mergeable_vma(vmg, /* merge_next = */ false) && 223 is_mergeable_anon_vma(vmg, /* merge_next = */ false)) { 224 if (vmg->prev->vm_pgoff + vma_pages(vmg->prev) == vmg->pgoff) 225 return true; 226 } 227 return false; 228 } 229 230 static void __vma_link_file(struct vm_area_struct *vma, 231 struct address_space *mapping) 232 { 233 if (vma_is_shared_maywrite(vma)) 234 mapping_allow_writable(mapping); 235 236 flush_dcache_mmap_lock(mapping); 237 vma_interval_tree_insert(vma, &mapping->i_mmap); 238 flush_dcache_mmap_unlock(mapping); 239 } 240 241 /* 242 * Requires inode->i_mapping->i_mmap_rwsem 243 */ 244 static void __remove_shared_vm_struct(struct vm_area_struct *vma, 245 struct address_space *mapping) 246 { 247 if (vma_is_shared_maywrite(vma)) 248 mapping_unmap_writable(mapping); 249 250 flush_dcache_mmap_lock(mapping); 251 vma_interval_tree_remove(vma, &mapping->i_mmap); 252 flush_dcache_mmap_unlock(mapping); 253 } 254 255 /* 256 * vma has some anon_vma assigned, and is already inserted on that 257 * anon_vma's interval trees. 258 * 259 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the 260 * vma must be removed from the anon_vma's interval trees using 261 * anon_vma_interval_tree_pre_update_vma(). 262 * 263 * After the update, the vma will be reinserted using 264 * anon_vma_interval_tree_post_update_vma(). 265 * 266 * The entire update must be protected by exclusive mmap_lock and by 267 * the root anon_vma's mutex. 268 */ 269 static void 270 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) 271 { 272 struct anon_vma_chain *avc; 273 274 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 275 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); 276 } 277 278 static void 279 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) 280 { 281 struct anon_vma_chain *avc; 282 283 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 284 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); 285 } 286 287 /* 288 * vma_prepare() - Helper function for handling locking VMAs prior to altering 289 * @vp: The initialized vma_prepare struct 290 */ 291 static void vma_prepare(struct vma_prepare *vp) 292 { 293 if (vp->file) { 294 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end); 295 296 if (vp->adj_next) 297 uprobe_munmap(vp->adj_next, vp->adj_next->vm_start, 298 vp->adj_next->vm_end); 299 300 i_mmap_lock_write(vp->mapping); 301 if (vp->insert && vp->insert->vm_file) { 302 /* 303 * Put into interval tree now, so instantiated pages 304 * are visible to arm/parisc __flush_dcache_page 305 * throughout; but we cannot insert into address 306 * space until vma start or end is updated. 307 */ 308 __vma_link_file(vp->insert, 309 vp->insert->vm_file->f_mapping); 310 } 311 } 312 313 if (vp->anon_vma) { 314 anon_vma_lock_write(vp->anon_vma); 315 anon_vma_interval_tree_pre_update_vma(vp->vma); 316 if (vp->adj_next) 317 anon_vma_interval_tree_pre_update_vma(vp->adj_next); 318 } 319 320 if (vp->file) { 321 flush_dcache_mmap_lock(vp->mapping); 322 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap); 323 if (vp->adj_next) 324 vma_interval_tree_remove(vp->adj_next, 325 &vp->mapping->i_mmap); 326 } 327 328 } 329 330 /* 331 * vma_complete- Helper function for handling the unlocking after altering VMAs, 332 * or for inserting a VMA. 333 * 334 * @vp: The vma_prepare struct 335 * @vmi: The vma iterator 336 * @mm: The mm_struct 337 */ 338 static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi, 339 struct mm_struct *mm) 340 { 341 if (vp->file) { 342 if (vp->adj_next) 343 vma_interval_tree_insert(vp->adj_next, 344 &vp->mapping->i_mmap); 345 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap); 346 flush_dcache_mmap_unlock(vp->mapping); 347 } 348 349 if (vp->remove && vp->file) { 350 __remove_shared_vm_struct(vp->remove, vp->mapping); 351 if (vp->remove2) 352 __remove_shared_vm_struct(vp->remove2, vp->mapping); 353 } else if (vp->insert) { 354 /* 355 * split_vma has split insert from vma, and needs 356 * us to insert it before dropping the locks 357 * (it may either follow vma or precede it). 358 */ 359 vma_iter_store_new(vmi, vp->insert); 360 mm->map_count++; 361 } 362 363 if (vp->anon_vma) { 364 anon_vma_interval_tree_post_update_vma(vp->vma); 365 if (vp->adj_next) 366 anon_vma_interval_tree_post_update_vma(vp->adj_next); 367 anon_vma_unlock_write(vp->anon_vma); 368 } 369 370 if (vp->file) { 371 i_mmap_unlock_write(vp->mapping); 372 373 if (!vp->skip_vma_uprobe) { 374 uprobe_mmap(vp->vma); 375 376 if (vp->adj_next) 377 uprobe_mmap(vp->adj_next); 378 } 379 } 380 381 if (vp->remove) { 382 again: 383 vma_mark_detached(vp->remove); 384 if (vp->file) { 385 uprobe_munmap(vp->remove, vp->remove->vm_start, 386 vp->remove->vm_end); 387 fput(vp->file); 388 } 389 if (vp->remove->anon_vma) 390 unlink_anon_vmas(vp->remove); 391 mm->map_count--; 392 mpol_put(vma_policy(vp->remove)); 393 if (!vp->remove2) 394 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end); 395 vm_area_free(vp->remove); 396 397 /* 398 * In mprotect's case 6 (see comments on vma_merge), 399 * we are removing both mid and next vmas 400 */ 401 if (vp->remove2) { 402 vp->remove = vp->remove2; 403 vp->remove2 = NULL; 404 goto again; 405 } 406 } 407 if (vp->insert && vp->file) 408 uprobe_mmap(vp->insert); 409 } 410 411 /* 412 * init_vma_prep() - Initializer wrapper for vma_prepare struct 413 * @vp: The vma_prepare struct 414 * @vma: The vma that will be altered once locked 415 */ 416 static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma) 417 { 418 init_multi_vma_prep(vp, vma, NULL); 419 } 420 421 /* 422 * Can the proposed VMA be merged with the left (previous) VMA taking into 423 * account the start position of the proposed range. 424 */ 425 static bool can_vma_merge_left(struct vma_merge_struct *vmg) 426 427 { 428 return vmg->prev && vmg->prev->vm_end == vmg->start && 429 can_vma_merge_after(vmg); 430 } 431 432 /* 433 * Can the proposed VMA be merged with the right (next) VMA taking into 434 * account the end position of the proposed range. 435 * 436 * In addition, if we can merge with the left VMA, ensure that left and right 437 * anon_vma's are also compatible. 438 */ 439 static bool can_vma_merge_right(struct vma_merge_struct *vmg, 440 bool can_merge_left) 441 { 442 struct vm_area_struct *next = vmg->next; 443 struct vm_area_struct *prev; 444 445 if (!next || vmg->end != next->vm_start || !can_vma_merge_before(vmg)) 446 return false; 447 448 if (!can_merge_left) 449 return true; 450 451 /* 452 * If we can merge with prev (left) and next (right), indicating that 453 * each VMA's anon_vma is compatible with the proposed anon_vma, this 454 * does not mean prev and next are compatible with EACH OTHER. 455 * 456 * We therefore check this in addition to mergeability to either side. 457 */ 458 prev = vmg->prev; 459 return !prev->anon_vma || !next->anon_vma || 460 prev->anon_vma == next->anon_vma; 461 } 462 463 /* 464 * Close a vm structure and free it. 465 */ 466 void remove_vma(struct vm_area_struct *vma) 467 { 468 might_sleep(); 469 vma_close(vma); 470 if (vma->vm_file) 471 fput(vma->vm_file); 472 mpol_put(vma_policy(vma)); 473 vm_area_free(vma); 474 } 475 476 /* 477 * Get rid of page table information in the indicated region. 478 * 479 * Called with the mm semaphore held. 480 */ 481 void unmap_region(struct unmap_desc *unmap) 482 { 483 struct mm_struct *mm = unmap->first->vm_mm; 484 struct mmu_gather tlb; 485 486 tlb_gather_mmu(&tlb, mm); 487 update_hiwater_rss(mm); 488 unmap_vmas(&tlb, unmap); 489 mas_set(unmap->mas, unmap->tree_reset); 490 free_pgtables(&tlb, unmap); 491 tlb_finish_mmu(&tlb); 492 } 493 494 /* 495 * __split_vma() bypasses sysctl_max_map_count checking. We use this where it 496 * has already been checked or doesn't make sense to fail. 497 * VMA Iterator will point to the original VMA. 498 */ 499 static __must_check int 500 __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, 501 unsigned long addr, int new_below) 502 { 503 struct vma_prepare vp; 504 struct vm_area_struct *new; 505 int err; 506 507 WARN_ON(vma->vm_start >= addr); 508 WARN_ON(vma->vm_end <= addr); 509 510 if (vma->vm_ops && vma->vm_ops->may_split) { 511 err = vma->vm_ops->may_split(vma, addr); 512 if (err) 513 return err; 514 } 515 516 new = vm_area_dup(vma); 517 if (!new) 518 return -ENOMEM; 519 520 if (new_below) { 521 new->vm_end = addr; 522 } else { 523 new->vm_start = addr; 524 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); 525 } 526 527 err = -ENOMEM; 528 vma_iter_config(vmi, new->vm_start, new->vm_end); 529 if (vma_iter_prealloc(vmi, new)) 530 goto out_free_vma; 531 532 err = vma_dup_policy(vma, new); 533 if (err) 534 goto out_free_vmi; 535 536 err = anon_vma_clone(new, vma, VMA_OP_SPLIT); 537 if (err) 538 goto out_free_mpol; 539 540 if (new->vm_file) 541 get_file(new->vm_file); 542 543 if (new->vm_ops && new->vm_ops->open) 544 new->vm_ops->open(new); 545 546 vma_start_write(vma); 547 vma_start_write(new); 548 549 init_vma_prep(&vp, vma); 550 vp.insert = new; 551 vma_prepare(&vp); 552 553 /* 554 * Get rid of huge pages and shared page tables straddling the split 555 * boundary. 556 */ 557 vma_adjust_trans_huge(vma, vma->vm_start, addr, NULL); 558 if (is_vm_hugetlb_page(vma)) 559 hugetlb_split(vma, addr); 560 561 if (new_below) { 562 vma->vm_start = addr; 563 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT; 564 } else { 565 vma->vm_end = addr; 566 } 567 568 /* vma_complete stores the new vma */ 569 vma_complete(&vp, vmi, vma->vm_mm); 570 validate_mm(vma->vm_mm); 571 572 /* Success. */ 573 if (new_below) 574 vma_next(vmi); 575 else 576 vma_prev(vmi); 577 578 return 0; 579 580 out_free_mpol: 581 mpol_put(vma_policy(new)); 582 out_free_vmi: 583 vma_iter_free(vmi); 584 out_free_vma: 585 vm_area_free(new); 586 return err; 587 } 588 589 /* 590 * Split a vma into two pieces at address 'addr', a new vma is allocated 591 * either for the first part or the tail. 592 */ 593 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, 594 unsigned long addr, int new_below) 595 { 596 if (vma->vm_mm->map_count >= get_sysctl_max_map_count()) 597 return -ENOMEM; 598 599 return __split_vma(vmi, vma, addr, new_below); 600 } 601 602 /* 603 * dup_anon_vma() - Helper function to duplicate anon_vma on VMA merge in the 604 * instance that the destination VMA has no anon_vma but the source does. 605 * 606 * @dst: The destination VMA 607 * @src: The source VMA 608 * @dup: Pointer to the destination VMA when successful. 609 * 610 * Returns: 0 on success. 611 */ 612 static int dup_anon_vma(struct vm_area_struct *dst, 613 struct vm_area_struct *src, struct vm_area_struct **dup) 614 { 615 /* 616 * There are three cases to consider for correctly propagating 617 * anon_vma's on merge. 618 * 619 * The first is trivial - neither VMA has anon_vma, we need not do 620 * anything. 621 * 622 * The second where both have anon_vma is also a no-op, as they must 623 * then be the same, so there is simply nothing to copy. 624 * 625 * Here we cover the third - if the destination VMA has no anon_vma, 626 * that is it is unfaulted, we need to ensure that the newly merged 627 * range is referenced by the anon_vma's of the source. 628 */ 629 if (src->anon_vma && !dst->anon_vma) { 630 int ret; 631 632 vma_assert_write_locked(dst); 633 dst->anon_vma = src->anon_vma; 634 ret = anon_vma_clone(dst, src, VMA_OP_MERGE_UNFAULTED); 635 if (ret) 636 return ret; 637 638 *dup = dst; 639 } 640 641 return 0; 642 } 643 644 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 645 void validate_mm(struct mm_struct *mm) 646 { 647 int bug = 0; 648 int i = 0; 649 struct vm_area_struct *vma; 650 VMA_ITERATOR(vmi, mm, 0); 651 652 mt_validate(&mm->mm_mt); 653 for_each_vma(vmi, vma) { 654 #ifdef CONFIG_DEBUG_VM_RB 655 struct anon_vma *anon_vma = vma->anon_vma; 656 struct anon_vma_chain *avc; 657 #endif 658 unsigned long vmi_start, vmi_end; 659 bool warn = 0; 660 661 vmi_start = vma_iter_addr(&vmi); 662 vmi_end = vma_iter_end(&vmi); 663 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm)) 664 warn = 1; 665 666 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm)) 667 warn = 1; 668 669 if (warn) { 670 pr_emerg("issue in %s\n", current->comm); 671 dump_stack(); 672 dump_vma(vma); 673 pr_emerg("tree range: %px start %lx end %lx\n", vma, 674 vmi_start, vmi_end - 1); 675 vma_iter_dump_tree(&vmi); 676 } 677 678 #ifdef CONFIG_DEBUG_VM_RB 679 if (anon_vma) { 680 anon_vma_lock_read(anon_vma); 681 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 682 anon_vma_interval_tree_verify(avc); 683 anon_vma_unlock_read(anon_vma); 684 } 685 #endif 686 /* Check for a infinite loop */ 687 if (++i > mm->map_count + 10) { 688 i = -1; 689 break; 690 } 691 } 692 if (i != mm->map_count) { 693 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i); 694 bug = 1; 695 } 696 VM_BUG_ON_MM(bug, mm); 697 } 698 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ 699 700 /* 701 * Based on the vmg flag indicating whether we need to adjust the vm_start field 702 * for the middle or next VMA, we calculate what the range of the newly adjusted 703 * VMA ought to be, and set the VMA's range accordingly. 704 */ 705 static void vmg_adjust_set_range(struct vma_merge_struct *vmg) 706 { 707 struct vm_area_struct *adjust; 708 pgoff_t pgoff; 709 710 if (vmg->__adjust_middle_start) { 711 adjust = vmg->middle; 712 pgoff = adjust->vm_pgoff + PHYS_PFN(vmg->end - adjust->vm_start); 713 } else if (vmg->__adjust_next_start) { 714 adjust = vmg->next; 715 pgoff = adjust->vm_pgoff - PHYS_PFN(adjust->vm_start - vmg->end); 716 } else { 717 return; 718 } 719 720 vma_set_range(adjust, vmg->end, adjust->vm_end, pgoff); 721 } 722 723 /* 724 * Actually perform the VMA merge operation. 725 * 726 * IMPORTANT: We guarantee that, should vmg->give_up_on_oom is set, to not 727 * modify any VMAs or cause inconsistent state should an OOM condition arise. 728 * 729 * Returns 0 on success, or an error value on failure. 730 */ 731 static int commit_merge(struct vma_merge_struct *vmg) 732 { 733 struct vm_area_struct *vma; 734 struct vma_prepare vp; 735 736 if (vmg->__adjust_next_start) { 737 /* We manipulate middle and adjust next, which is the target. */ 738 vma = vmg->middle; 739 vma_iter_config(vmg->vmi, vmg->end, vmg->next->vm_end); 740 } else { 741 vma = vmg->target; 742 /* Note: vma iterator must be pointing to 'start'. */ 743 vma_iter_config(vmg->vmi, vmg->start, vmg->end); 744 } 745 746 init_multi_vma_prep(&vp, vma, vmg); 747 748 /* 749 * If vmg->give_up_on_oom is set, we're safe, because we don't actually 750 * manipulate any VMAs until we succeed at preallocation. 751 * 752 * Past this point, we will not return an error. 753 */ 754 if (vma_iter_prealloc(vmg->vmi, vma)) 755 return -ENOMEM; 756 757 vma_prepare(&vp); 758 /* 759 * THP pages may need to do additional splits if we increase 760 * middle->vm_start. 761 */ 762 vma_adjust_trans_huge(vma, vmg->start, vmg->end, 763 vmg->__adjust_middle_start ? vmg->middle : NULL); 764 vma_set_range(vma, vmg->start, vmg->end, vmg->pgoff); 765 vmg_adjust_set_range(vmg); 766 vma_iter_store_overwrite(vmg->vmi, vmg->target); 767 768 vma_complete(&vp, vmg->vmi, vma->vm_mm); 769 770 return 0; 771 } 772 773 /* We can only remove VMAs when merging if they do not have a close hook. */ 774 static bool can_merge_remove_vma(struct vm_area_struct *vma) 775 { 776 return !vma->vm_ops || !vma->vm_ops->close; 777 } 778 779 /* 780 * vma_merge_existing_range - Attempt to merge VMAs based on a VMA having its 781 * attributes modified. 782 * 783 * @vmg: Describes the modifications being made to a VMA and associated 784 * metadata. 785 * 786 * When the attributes of a range within a VMA change, then it might be possible 787 * for immediately adjacent VMAs to be merged into that VMA due to having 788 * identical properties. 789 * 790 * This function checks for the existence of any such mergeable VMAs and updates 791 * the maple tree describing the @vmg->middle->vm_mm address space to account 792 * for this, as well as any VMAs shrunk/expanded/deleted as a result of this 793 * merge. 794 * 795 * As part of this operation, if a merge occurs, the @vmg object will have its 796 * vma, start, end, and pgoff fields modified to execute the merge. Subsequent 797 * calls to this function should reset these fields. 798 * 799 * Returns: The merged VMA if merge succeeds, or NULL otherwise. 800 * 801 * ASSUMPTIONS: 802 * - The caller must assign the VMA to be modified to @vmg->middle. 803 * - The caller must have set @vmg->prev to the previous VMA, if there is one. 804 * - The caller must not set @vmg->next, as we determine this. 805 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock. 806 * - vmi must be positioned within [@vmg->middle->vm_start, @vmg->middle->vm_end). 807 */ 808 static __must_check struct vm_area_struct *vma_merge_existing_range( 809 struct vma_merge_struct *vmg) 810 { 811 vma_flags_t sticky_flags = vma_flags_and_mask(&vmg->vma_flags, 812 VMA_STICKY_FLAGS); 813 struct vm_area_struct *middle = vmg->middle; 814 struct vm_area_struct *prev = vmg->prev; 815 struct vm_area_struct *next; 816 struct vm_area_struct *anon_dup = NULL; 817 unsigned long start = vmg->start; 818 unsigned long end = vmg->end; 819 bool left_side = middle && start == middle->vm_start; 820 bool right_side = middle && end == middle->vm_end; 821 int err = 0; 822 bool merge_left, merge_right, merge_both; 823 824 mmap_assert_write_locked(vmg->mm); 825 VM_WARN_ON_VMG(!middle, vmg); /* We are modifying a VMA, so caller must specify. */ 826 VM_WARN_ON_VMG(vmg->next, vmg); /* We set this. */ 827 VM_WARN_ON_VMG(prev && start <= prev->vm_start, vmg); 828 VM_WARN_ON_VMG(start >= end, vmg); 829 830 /* 831 * If middle == prev, then we are offset into a VMA. Otherwise, if we are 832 * not, we must span a portion of the VMA. 833 */ 834 VM_WARN_ON_VMG(middle && 835 ((middle != prev && vmg->start != middle->vm_start) || 836 vmg->end > middle->vm_end), vmg); 837 /* The vmi must be positioned within vmg->middle. */ 838 VM_WARN_ON_VMG(middle && 839 !(vma_iter_addr(vmg->vmi) >= middle->vm_start && 840 vma_iter_addr(vmg->vmi) < middle->vm_end), vmg); 841 /* An existing merge can never be used by the mremap() logic. */ 842 VM_WARN_ON_VMG(vmg->copied_from, vmg); 843 844 vmg->state = VMA_MERGE_NOMERGE; 845 846 /* 847 * If a special mapping or if the range being modified is neither at the 848 * furthermost left or right side of the VMA, then we have no chance of 849 * merging and should abort. 850 */ 851 if (vma_flags_test_any_mask(&vmg->vma_flags, VMA_SPECIAL_FLAGS) || 852 (!left_side && !right_side)) 853 return NULL; 854 855 if (left_side) 856 merge_left = can_vma_merge_left(vmg); 857 else 858 merge_left = false; 859 860 if (right_side) { 861 next = vmg->next = vma_iter_next_range(vmg->vmi); 862 vma_iter_prev_range(vmg->vmi); 863 864 merge_right = can_vma_merge_right(vmg, merge_left); 865 } else { 866 merge_right = false; 867 next = NULL; 868 } 869 870 if (merge_left) /* If merging prev, position iterator there. */ 871 vma_prev(vmg->vmi); 872 else if (!merge_right) /* If we have nothing to merge, abort. */ 873 return NULL; 874 875 merge_both = merge_left && merge_right; 876 /* If we span the entire VMA, a merge implies it will be deleted. */ 877 vmg->__remove_middle = left_side && right_side; 878 879 /* 880 * If we need to remove middle in its entirety but are unable to do so, 881 * we have no sensible recourse but to abort the merge. 882 */ 883 if (vmg->__remove_middle && !can_merge_remove_vma(middle)) 884 return NULL; 885 886 /* 887 * If we merge both VMAs, then next is also deleted. This implies 888 * merge_will_delete_vma also. 889 */ 890 vmg->__remove_next = merge_both; 891 892 /* 893 * If we cannot delete next, then we can reduce the operation to merging 894 * prev and middle (thereby deleting middle). 895 */ 896 if (vmg->__remove_next && !can_merge_remove_vma(next)) { 897 vmg->__remove_next = false; 898 merge_right = false; 899 merge_both = false; 900 } 901 902 /* No matter what happens, we will be adjusting middle. */ 903 vma_start_write(middle); 904 905 if (merge_right) { 906 vma_flags_t next_sticky; 907 908 vma_start_write(next); 909 vmg->target = next; 910 next_sticky = vma_flags_and_mask(&next->flags, VMA_STICKY_FLAGS); 911 vma_flags_set_mask(&sticky_flags, next_sticky); 912 } 913 914 if (merge_left) { 915 vma_flags_t prev_sticky; 916 917 vma_start_write(prev); 918 vmg->target = prev; 919 920 prev_sticky = vma_flags_and_mask(&prev->flags, VMA_STICKY_FLAGS); 921 vma_flags_set_mask(&sticky_flags, prev_sticky); 922 } 923 924 if (merge_both) { 925 /* 926 * |<-------------------->| 927 * |-------********-------| 928 * prev middle next 929 * extend delete delete 930 */ 931 932 vmg->start = prev->vm_start; 933 vmg->end = next->vm_end; 934 vmg->pgoff = prev->vm_pgoff; 935 936 /* 937 * We already ensured anon_vma compatibility above, so now it's 938 * simply a case of, if prev has no anon_vma object, which of 939 * next or middle contains the anon_vma we must duplicate. 940 */ 941 err = dup_anon_vma(prev, next->anon_vma ? next : middle, 942 &anon_dup); 943 } else if (merge_left) { 944 /* 945 * |<------------>| OR 946 * |<----------------->| 947 * |-------************* 948 * prev middle 949 * extend shrink/delete 950 */ 951 952 vmg->start = prev->vm_start; 953 vmg->pgoff = prev->vm_pgoff; 954 955 if (!vmg->__remove_middle) 956 vmg->__adjust_middle_start = true; 957 958 err = dup_anon_vma(prev, middle, &anon_dup); 959 } else { /* merge_right */ 960 /* 961 * |<------------->| OR 962 * |<----------------->| 963 * *************-------| 964 * middle next 965 * shrink/delete extend 966 */ 967 968 pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start); 969 970 VM_WARN_ON_VMG(!merge_right, vmg); 971 /* If we are offset into a VMA, then prev must be middle. */ 972 VM_WARN_ON_VMG(vmg->start > middle->vm_start && prev && middle != prev, vmg); 973 974 if (vmg->__remove_middle) { 975 vmg->end = next->vm_end; 976 vmg->pgoff = next->vm_pgoff - pglen; 977 } else { 978 /* We shrink middle and expand next. */ 979 vmg->__adjust_next_start = true; 980 vmg->start = middle->vm_start; 981 vmg->end = start; 982 vmg->pgoff = middle->vm_pgoff; 983 } 984 985 err = dup_anon_vma(next, middle, &anon_dup); 986 } 987 988 if (err || commit_merge(vmg)) 989 goto abort; 990 991 vma_set_flags_mask(vmg->target, sticky_flags); 992 khugepaged_enter_vma(vmg->target, vmg->vm_flags); 993 vmg->state = VMA_MERGE_SUCCESS; 994 return vmg->target; 995 996 abort: 997 vma_iter_set(vmg->vmi, start); 998 vma_iter_load(vmg->vmi); 999 1000 if (anon_dup) 1001 unlink_anon_vmas(anon_dup); 1002 1003 /* 1004 * This means we have failed to clone anon_vma's correctly, but no 1005 * actual changes to VMAs have occurred, so no harm no foul - if the 1006 * user doesn't want this reported and instead just wants to give up on 1007 * the merge, allow it. 1008 */ 1009 if (!vmg->give_up_on_oom) 1010 vmg->state = VMA_MERGE_ERROR_NOMEM; 1011 return NULL; 1012 } 1013 1014 /* 1015 * vma_merge_new_range - Attempt to merge a new VMA into address space 1016 * 1017 * @vmg: Describes the VMA we are adding, in the range @vmg->start to @vmg->end 1018 * (exclusive), which we try to merge with any adjacent VMAs if possible. 1019 * 1020 * We are about to add a VMA to the address space starting at @vmg->start and 1021 * ending at @vmg->end. There are three different possible scenarios: 1022 * 1023 * 1. There is a VMA with identical properties immediately adjacent to the 1024 * proposed new VMA [@vmg->start, @vmg->end) either before or after it - 1025 * EXPAND that VMA: 1026 * 1027 * Proposed: |-----| or |-----| 1028 * Existing: |----| |----| 1029 * 1030 * 2. There are VMAs with identical properties immediately adjacent to the 1031 * proposed new VMA [@vmg->start, @vmg->end) both before AND after it - 1032 * EXPAND the former and REMOVE the latter: 1033 * 1034 * Proposed: |-----| 1035 * Existing: |----| |----| 1036 * 1037 * 3. There are no VMAs immediately adjacent to the proposed new VMA or those 1038 * VMAs do not have identical attributes - NO MERGE POSSIBLE. 1039 * 1040 * In instances where we can merge, this function returns the expanded VMA which 1041 * will have its range adjusted accordingly and the underlying maple tree also 1042 * adjusted. 1043 * 1044 * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer 1045 * to the VMA we expanded. 1046 * 1047 * This function adjusts @vmg to provide @vmg->next if not already specified, 1048 * and adjusts [@vmg->start, @vmg->end) to span the expanded range. 1049 * 1050 * ASSUMPTIONS: 1051 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock. 1052 * - The caller must have determined that [@vmg->start, @vmg->end) is empty, 1053 other than VMAs that will be unmapped should the operation succeed. 1054 * - The caller must have specified the previous vma in @vmg->prev. 1055 * - The caller must have specified the next vma in @vmg->next. 1056 * - The caller must have positioned the vmi at or before the gap. 1057 */ 1058 struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) 1059 { 1060 struct vm_area_struct *prev = vmg->prev; 1061 struct vm_area_struct *next = vmg->next; 1062 unsigned long end = vmg->end; 1063 bool can_merge_left, can_merge_right; 1064 1065 mmap_assert_write_locked(vmg->mm); 1066 VM_WARN_ON_VMG(vmg->middle, vmg); 1067 VM_WARN_ON_VMG(vmg->target, vmg); 1068 /* vmi must point at or before the gap. */ 1069 VM_WARN_ON_VMG(vma_iter_addr(vmg->vmi) > end, vmg); 1070 1071 vmg->state = VMA_MERGE_NOMERGE; 1072 1073 /* Special VMAs are unmergeable, also if no prev/next. */ 1074 if (vma_flags_test_any_mask(&vmg->vma_flags, VMA_SPECIAL_FLAGS) || 1075 (!prev && !next)) 1076 return NULL; 1077 1078 can_merge_left = can_vma_merge_left(vmg); 1079 can_merge_right = !vmg->just_expand && can_vma_merge_right(vmg, can_merge_left); 1080 1081 /* If we can merge with the next VMA, adjust vmg accordingly. */ 1082 if (can_merge_right) { 1083 vmg->end = next->vm_end; 1084 vmg->target = next; 1085 } 1086 1087 /* If we can merge with the previous VMA, adjust vmg accordingly. */ 1088 if (can_merge_left) { 1089 vmg->start = prev->vm_start; 1090 vmg->target = prev; 1091 vmg->pgoff = prev->vm_pgoff; 1092 1093 /* 1094 * If this merge would result in removal of the next VMA but we 1095 * are not permitted to do so, reduce the operation to merging 1096 * prev and vma. 1097 */ 1098 if (can_merge_right && !can_merge_remove_vma(next)) 1099 vmg->end = end; 1100 1101 /* In expand-only case we are already positioned at prev. */ 1102 if (!vmg->just_expand) { 1103 /* Equivalent to going to the previous range. */ 1104 vma_prev(vmg->vmi); 1105 } 1106 } 1107 1108 /* 1109 * Now try to expand adjacent VMA(s). This takes care of removing the 1110 * following VMA if we have VMAs on both sides. 1111 */ 1112 if (vmg->target && !vma_expand(vmg)) { 1113 khugepaged_enter_vma(vmg->target, vmg->vm_flags); 1114 vmg->state = VMA_MERGE_SUCCESS; 1115 return vmg->target; 1116 } 1117 1118 return NULL; 1119 } 1120 1121 /* 1122 * vma_merge_copied_range - Attempt to merge a VMA that is being copied by 1123 * mremap() 1124 * 1125 * @vmg: Describes the VMA we are adding, in the copied-to range @vmg->start to 1126 * @vmg->end (exclusive), which we try to merge with any adjacent VMAs if 1127 * possible. 1128 * 1129 * vmg->prev, next, start, end, pgoff should all be relative to the COPIED TO 1130 * range, i.e. the target range for the VMA. 1131 * 1132 * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer 1133 * to the VMA we expanded. 1134 * 1135 * ASSUMPTIONS: Same as vma_merge_new_range(), except vmg->middle must contain 1136 * the copied-from VMA. 1137 */ 1138 static struct vm_area_struct *vma_merge_copied_range(struct vma_merge_struct *vmg) 1139 { 1140 /* We must have a copied-from VMA. */ 1141 VM_WARN_ON_VMG(!vmg->middle, vmg); 1142 1143 vmg->copied_from = vmg->middle; 1144 vmg->middle = NULL; 1145 return vma_merge_new_range(vmg); 1146 } 1147 1148 /* 1149 * vma_expand - Expand an existing VMA 1150 * 1151 * @vmg: Describes a VMA expansion operation. 1152 * 1153 * Expand @vma to vmg->start and vmg->end. Can expand off the start and end. 1154 * Will expand over vmg->next if it's different from vmg->target and vmg->end == 1155 * vmg->next->vm_end. Checking if the vmg->target can expand and merge with 1156 * vmg->next needs to be handled by the caller. 1157 * 1158 * Returns: 0 on success. 1159 * 1160 * ASSUMPTIONS: 1161 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock. 1162 * - The caller must have set @vmg->target and @vmg->next. 1163 */ 1164 int vma_expand(struct vma_merge_struct *vmg) 1165 { 1166 struct vm_area_struct *anon_dup = NULL; 1167 struct vm_area_struct *target = vmg->target; 1168 struct vm_area_struct *next = vmg->next; 1169 bool remove_next = false; 1170 vma_flags_t sticky_flags = 1171 vma_flags_and_mask(&vmg->vma_flags, VMA_STICKY_FLAGS); 1172 vma_flags_t target_sticky; 1173 int ret = 0; 1174 1175 mmap_assert_write_locked(vmg->mm); 1176 vma_start_write(target); 1177 1178 target_sticky = vma_flags_and_mask(&target->flags, VMA_STICKY_FLAGS); 1179 1180 if (next && target != next && vmg->end == next->vm_end) 1181 remove_next = true; 1182 1183 /* We must have a target. */ 1184 VM_WARN_ON_VMG(!target, vmg); 1185 /* This should have already been checked by this point. */ 1186 VM_WARN_ON_VMG(remove_next && !can_merge_remove_vma(next), vmg); 1187 /* Not merging but overwriting any part of next is not handled. */ 1188 VM_WARN_ON_VMG(next && !remove_next && 1189 next != target && vmg->end > next->vm_start, vmg); 1190 /* Only handles expanding. */ 1191 VM_WARN_ON_VMG(target->vm_start < vmg->start || 1192 target->vm_end > vmg->end, vmg); 1193 1194 vma_flags_set_mask(&sticky_flags, target_sticky); 1195 1196 /* 1197 * If we are removing the next VMA or copying from a VMA 1198 * (e.g. mremap()'ing), we must propagate anon_vma state. 1199 * 1200 * Note that, by convention, callers ignore OOM for this case, so 1201 * we don't need to account for vmg->give_up_on_mm here. 1202 */ 1203 if (remove_next) 1204 ret = dup_anon_vma(target, next, &anon_dup); 1205 if (!ret && vmg->copied_from) 1206 ret = dup_anon_vma(target, vmg->copied_from, &anon_dup); 1207 if (ret) 1208 return ret; 1209 1210 if (remove_next) { 1211 vma_flags_t next_sticky; 1212 1213 vma_start_write(next); 1214 vmg->__remove_next = true; 1215 1216 next_sticky = vma_flags_and_mask(&next->flags, VMA_STICKY_FLAGS); 1217 vma_flags_set_mask(&sticky_flags, next_sticky); 1218 } 1219 if (commit_merge(vmg)) 1220 goto nomem; 1221 1222 vma_set_flags_mask(target, sticky_flags); 1223 return 0; 1224 1225 nomem: 1226 if (anon_dup) 1227 unlink_anon_vmas(anon_dup); 1228 /* 1229 * If the user requests that we just give upon OOM, we are safe to do so 1230 * here, as commit merge provides this contract to us. Nothing has been 1231 * changed - no harm no foul, just don't report it. 1232 */ 1233 if (!vmg->give_up_on_oom) 1234 vmg->state = VMA_MERGE_ERROR_NOMEM; 1235 return -ENOMEM; 1236 } 1237 1238 /* 1239 * vma_shrink() - Reduce an existing VMAs memory area 1240 * @vmi: The vma iterator 1241 * @vma: The VMA to modify 1242 * @start: The new start 1243 * @end: The new end 1244 * 1245 * Returns: 0 on success, -ENOMEM otherwise 1246 */ 1247 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, 1248 unsigned long start, unsigned long end, pgoff_t pgoff) 1249 { 1250 struct vma_prepare vp; 1251 1252 WARN_ON((vma->vm_start != start) && (vma->vm_end != end)); 1253 1254 if (vma->vm_start < start) 1255 vma_iter_config(vmi, vma->vm_start, start); 1256 else 1257 vma_iter_config(vmi, end, vma->vm_end); 1258 1259 if (vma_iter_prealloc(vmi, NULL)) 1260 return -ENOMEM; 1261 1262 vma_start_write(vma); 1263 1264 init_vma_prep(&vp, vma); 1265 vma_prepare(&vp); 1266 vma_adjust_trans_huge(vma, start, end, NULL); 1267 1268 vma_iter_clear(vmi); 1269 vma_set_range(vma, start, end, pgoff); 1270 vma_complete(&vp, vmi, vma->vm_mm); 1271 validate_mm(vma->vm_mm); 1272 return 0; 1273 } 1274 1275 static inline void vms_clear_ptes(struct vma_munmap_struct *vms, 1276 struct ma_state *mas_detach, bool mm_wr_locked) 1277 { 1278 struct unmap_desc unmap = { 1279 .mas = mas_detach, 1280 .first = vms->vma, 1281 /* start and end may be different if there is no prev or next vma. */ 1282 .pg_start = vms->unmap_start, 1283 .pg_end = vms->unmap_end, 1284 .vma_start = vms->start, 1285 .vma_end = vms->end, 1286 /* 1287 * The tree limits and reset differ from the normal case since it's a 1288 * side-tree 1289 */ 1290 .tree_reset = 1, 1291 .tree_end = vms->vma_count, 1292 /* 1293 * We can free page tables without write-locking mmap_lock because VMAs 1294 * were isolated before we downgraded mmap_lock. 1295 */ 1296 .mm_wr_locked = mm_wr_locked, 1297 }; 1298 1299 if (!vms->clear_ptes) /* Nothing to do */ 1300 return; 1301 1302 mas_set(mas_detach, 1); 1303 unmap_region(&unmap); 1304 vms->clear_ptes = false; 1305 } 1306 1307 static void vms_clean_up_area(struct vma_munmap_struct *vms, 1308 struct ma_state *mas_detach) 1309 { 1310 struct vm_area_struct *vma; 1311 1312 if (!vms->nr_pages) 1313 return; 1314 1315 vms_clear_ptes(vms, mas_detach, true); 1316 mas_set(mas_detach, 0); 1317 mas_for_each(mas_detach, vma, ULONG_MAX) 1318 vma_close(vma); 1319 } 1320 1321 /* 1322 * vms_complete_munmap_vmas() - Finish the munmap() operation 1323 * @vms: The vma munmap struct 1324 * @mas_detach: The maple state of the detached vmas 1325 * 1326 * This updates the mm_struct, unmaps the region, frees the resources 1327 * used for the munmap() and may downgrade the lock - if requested. Everything 1328 * needed to be done once the vma maple tree is updated. 1329 */ 1330 static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms, 1331 struct ma_state *mas_detach) 1332 { 1333 struct vm_area_struct *vma; 1334 struct mm_struct *mm; 1335 1336 mm = current->mm; 1337 mm->map_count -= vms->vma_count; 1338 mm->locked_vm -= vms->locked_vm; 1339 if (vms->unlock) 1340 mmap_write_downgrade(mm); 1341 1342 if (!vms->nr_pages) 1343 return; 1344 1345 vms_clear_ptes(vms, mas_detach, !vms->unlock); 1346 /* Update high watermark before we lower total_vm */ 1347 update_hiwater_vm(mm); 1348 /* Stat accounting */ 1349 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages); 1350 /* Paranoid bookkeeping */ 1351 VM_WARN_ON(vms->exec_vm > mm->exec_vm); 1352 VM_WARN_ON(vms->stack_vm > mm->stack_vm); 1353 VM_WARN_ON(vms->data_vm > mm->data_vm); 1354 mm->exec_vm -= vms->exec_vm; 1355 mm->stack_vm -= vms->stack_vm; 1356 mm->data_vm -= vms->data_vm; 1357 1358 /* Remove and clean up vmas */ 1359 mas_set(mas_detach, 0); 1360 mas_for_each(mas_detach, vma, ULONG_MAX) 1361 remove_vma(vma); 1362 1363 vm_unacct_memory(vms->nr_accounted); 1364 validate_mm(mm); 1365 if (vms->unlock) 1366 mmap_read_unlock(mm); 1367 1368 __mt_destroy(mas_detach->tree); 1369 } 1370 1371 /* 1372 * reattach_vmas() - Undo any munmap work and free resources 1373 * @mas_detach: The maple state with the detached maple tree 1374 * 1375 * Reattach any detached vmas and free up the maple tree used to track the vmas. 1376 */ 1377 static void reattach_vmas(struct ma_state *mas_detach) 1378 { 1379 struct vm_area_struct *vma; 1380 1381 mas_set(mas_detach, 0); 1382 mas_for_each(mas_detach, vma, ULONG_MAX) 1383 vma_mark_attached(vma); 1384 1385 __mt_destroy(mas_detach->tree); 1386 } 1387 1388 /* 1389 * vms_gather_munmap_vmas() - Put all VMAs within a range into a maple tree 1390 * for removal at a later date. Handles splitting first and last if necessary 1391 * and marking the vmas as isolated. 1392 * 1393 * @vms: The vma munmap struct 1394 * @mas_detach: The maple state tracking the detached tree 1395 * 1396 * Return: 0 on success, error otherwise 1397 */ 1398 static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms, 1399 struct ma_state *mas_detach) 1400 { 1401 struct vm_area_struct *next = NULL; 1402 int error; 1403 1404 /* 1405 * If we need to split any vma, do it now to save pain later. 1406 * Does it split the first one? 1407 */ 1408 if (vms->start > vms->vma->vm_start) { 1409 1410 /* 1411 * Make sure that map_count on return from munmap() will 1412 * not exceed its limit; but let map_count go just above 1413 * its limit temporarily, to help free resources as expected. 1414 */ 1415 if (vms->end < vms->vma->vm_end && 1416 vms->vma->vm_mm->map_count >= get_sysctl_max_map_count()) { 1417 error = -ENOMEM; 1418 goto map_count_exceeded; 1419 } 1420 1421 /* Don't bother splitting the VMA if we can't unmap it anyway */ 1422 if (vma_is_sealed(vms->vma)) { 1423 error = -EPERM; 1424 goto start_split_failed; 1425 } 1426 1427 error = __split_vma(vms->vmi, vms->vma, vms->start, 1); 1428 if (error) 1429 goto start_split_failed; 1430 } 1431 vms->prev = vma_prev(vms->vmi); 1432 if (vms->prev) 1433 vms->unmap_start = vms->prev->vm_end; 1434 1435 /* 1436 * Detach a range of VMAs from the mm. Using next as a temp variable as 1437 * it is always overwritten. 1438 */ 1439 for_each_vma_range(*(vms->vmi), next, vms->end) { 1440 long nrpages; 1441 1442 if (vma_is_sealed(next)) { 1443 error = -EPERM; 1444 goto modify_vma_failed; 1445 } 1446 /* Does it split the end? */ 1447 if (next->vm_end > vms->end) { 1448 error = __split_vma(vms->vmi, next, vms->end, 0); 1449 if (error) 1450 goto end_split_failed; 1451 } 1452 vma_start_write(next); 1453 mas_set(mas_detach, vms->vma_count++); 1454 error = mas_store_gfp(mas_detach, next, GFP_KERNEL); 1455 if (error) 1456 goto munmap_gather_failed; 1457 1458 vma_mark_detached(next); 1459 nrpages = vma_pages(next); 1460 1461 vms->nr_pages += nrpages; 1462 if (vma_test(next, VMA_LOCKED_BIT)) 1463 vms->locked_vm += nrpages; 1464 1465 if (vma_test(next, VMA_ACCOUNT_BIT)) 1466 vms->nr_accounted += nrpages; 1467 1468 if (is_exec_mapping(next->vm_flags)) 1469 vms->exec_vm += nrpages; 1470 else if (is_stack_mapping(next->vm_flags)) 1471 vms->stack_vm += nrpages; 1472 else if (is_data_mapping_vma_flags(&next->flags)) 1473 vms->data_vm += nrpages; 1474 1475 if (vms->uf) { 1476 /* 1477 * If userfaultfd_unmap_prep returns an error the vmas 1478 * will remain split, but userland will get a 1479 * highly unexpected error anyway. This is no 1480 * different than the case where the first of the two 1481 * __split_vma fails, but we don't undo the first 1482 * split, despite we could. This is unlikely enough 1483 * failure that it's not worth optimizing it for. 1484 */ 1485 error = userfaultfd_unmap_prep(next, vms->start, 1486 vms->end, vms->uf); 1487 if (error) 1488 goto userfaultfd_error; 1489 } 1490 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 1491 BUG_ON(next->vm_start < vms->start); 1492 BUG_ON(next->vm_start > vms->end); 1493 #endif 1494 } 1495 1496 vms->next = vma_next(vms->vmi); 1497 if (vms->next) 1498 vms->unmap_end = vms->next->vm_start; 1499 1500 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 1501 /* Make sure no VMAs are about to be lost. */ 1502 { 1503 MA_STATE(test, mas_detach->tree, 0, 0); 1504 struct vm_area_struct *vma_mas, *vma_test; 1505 int test_count = 0; 1506 1507 vma_iter_set(vms->vmi, vms->start); 1508 rcu_read_lock(); 1509 vma_test = mas_find(&test, vms->vma_count - 1); 1510 for_each_vma_range(*(vms->vmi), vma_mas, vms->end) { 1511 BUG_ON(vma_mas != vma_test); 1512 test_count++; 1513 vma_test = mas_next(&test, vms->vma_count - 1); 1514 } 1515 rcu_read_unlock(); 1516 BUG_ON(vms->vma_count != test_count); 1517 } 1518 #endif 1519 1520 while (vma_iter_addr(vms->vmi) > vms->start) 1521 vma_iter_prev_range(vms->vmi); 1522 1523 vms->clear_ptes = true; 1524 return 0; 1525 1526 userfaultfd_error: 1527 munmap_gather_failed: 1528 end_split_failed: 1529 modify_vma_failed: 1530 reattach_vmas(mas_detach); 1531 start_split_failed: 1532 map_count_exceeded: 1533 return error; 1534 } 1535 1536 /* 1537 * init_vma_munmap() - Initializer wrapper for vma_munmap_struct 1538 * @vms: The vma munmap struct 1539 * @vmi: The vma iterator 1540 * @vma: The first vm_area_struct to munmap 1541 * @start: The aligned start address to munmap 1542 * @end: The aligned end address to munmap 1543 * @uf: The userfaultfd list_head 1544 * @unlock: Unlock after the operation. Only unlocked on success 1545 */ 1546 static void init_vma_munmap(struct vma_munmap_struct *vms, 1547 struct vma_iterator *vmi, struct vm_area_struct *vma, 1548 unsigned long start, unsigned long end, struct list_head *uf, 1549 bool unlock) 1550 { 1551 vms->vmi = vmi; 1552 vms->vma = vma; 1553 if (vma) { 1554 vms->start = start; 1555 vms->end = end; 1556 } else { 1557 vms->start = vms->end = 0; 1558 } 1559 vms->unlock = unlock; 1560 vms->uf = uf; 1561 vms->vma_count = 0; 1562 vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0; 1563 vms->exec_vm = vms->stack_vm = vms->data_vm = 0; 1564 vms->unmap_start = FIRST_USER_ADDRESS; 1565 vms->unmap_end = USER_PGTABLES_CEILING; 1566 vms->clear_ptes = false; 1567 } 1568 1569 /* 1570 * do_vmi_align_munmap() - munmap the aligned region from @start to @end. 1571 * @vmi: The vma iterator 1572 * @vma: The starting vm_area_struct 1573 * @mm: The mm_struct 1574 * @start: The aligned start address to munmap. 1575 * @end: The aligned end address to munmap. 1576 * @uf: The userfaultfd list_head 1577 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on 1578 * success. 1579 * 1580 * Return: 0 on success and drops the lock if so directed, error and leaves the 1581 * lock held otherwise. 1582 */ 1583 int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 1584 struct mm_struct *mm, unsigned long start, unsigned long end, 1585 struct list_head *uf, bool unlock) 1586 { 1587 struct maple_tree mt_detach; 1588 MA_STATE(mas_detach, &mt_detach, 0, 0); 1589 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); 1590 mt_on_stack(mt_detach); 1591 struct vma_munmap_struct vms; 1592 int error; 1593 1594 init_vma_munmap(&vms, vmi, vma, start, end, uf, unlock); 1595 error = vms_gather_munmap_vmas(&vms, &mas_detach); 1596 if (error) 1597 goto gather_failed; 1598 1599 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL); 1600 if (error) 1601 goto clear_tree_failed; 1602 1603 /* Point of no return */ 1604 vms_complete_munmap_vmas(&vms, &mas_detach); 1605 return 0; 1606 1607 clear_tree_failed: 1608 reattach_vmas(&mas_detach); 1609 gather_failed: 1610 validate_mm(mm); 1611 return error; 1612 } 1613 1614 /* 1615 * do_vmi_munmap() - munmap a given range. 1616 * @vmi: The vma iterator 1617 * @mm: The mm_struct 1618 * @start: The start address to munmap 1619 * @len: The length of the range to munmap 1620 * @uf: The userfaultfd list_head 1621 * @unlock: set to true if the user wants to drop the mmap_lock on success 1622 * 1623 * This function takes a @mas that is either pointing to the previous VMA or set 1624 * to MA_START and sets it up to remove the mapping(s). The @len will be 1625 * aligned. 1626 * 1627 * Return: 0 on success and drops the lock if so directed, error and leaves the 1628 * lock held otherwise. 1629 */ 1630 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, 1631 unsigned long start, size_t len, struct list_head *uf, 1632 bool unlock) 1633 { 1634 unsigned long end; 1635 struct vm_area_struct *vma; 1636 1637 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) 1638 return -EINVAL; 1639 1640 end = start + PAGE_ALIGN(len); 1641 if (end == start) 1642 return -EINVAL; 1643 1644 /* Find the first overlapping VMA */ 1645 vma = vma_find(vmi, end); 1646 if (!vma) { 1647 if (unlock) 1648 mmap_write_unlock(mm); 1649 return 0; 1650 } 1651 1652 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); 1653 } 1654 1655 /* 1656 * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd 1657 * context and anonymous VMA name within the range [start, end). 1658 * 1659 * As a result, we might be able to merge the newly modified VMA range with an 1660 * adjacent VMA with identical properties. 1661 * 1662 * If no merge is possible and the range does not span the entirety of the VMA, 1663 * we then need to split the VMA to accommodate the change. 1664 * 1665 * The function returns either the merged VMA, the original VMA if a split was 1666 * required instead, or an error if the split failed. 1667 */ 1668 static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg) 1669 { 1670 struct vm_area_struct *vma = vmg->middle; 1671 unsigned long start = vmg->start; 1672 unsigned long end = vmg->end; 1673 struct vm_area_struct *merged; 1674 1675 /* First, try to merge. */ 1676 merged = vma_merge_existing_range(vmg); 1677 if (merged) 1678 return merged; 1679 if (vmg_nomem(vmg)) 1680 return ERR_PTR(-ENOMEM); 1681 1682 /* 1683 * Split can fail for reasons other than OOM, so if the user requests 1684 * this it's probably a mistake. 1685 */ 1686 VM_WARN_ON(vmg->give_up_on_oom && 1687 (vma->vm_start != start || vma->vm_end != end)); 1688 1689 /* Split any preceding portion of the VMA. */ 1690 if (vma->vm_start < start) { 1691 int err = split_vma(vmg->vmi, vma, start, 1); 1692 1693 if (err) 1694 return ERR_PTR(err); 1695 } 1696 1697 /* Split any trailing portion of the VMA. */ 1698 if (vma->vm_end > end) { 1699 int err = split_vma(vmg->vmi, vma, end, 0); 1700 1701 if (err) 1702 return ERR_PTR(err); 1703 } 1704 1705 return vma; 1706 } 1707 1708 struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi, 1709 struct vm_area_struct *prev, struct vm_area_struct *vma, 1710 unsigned long start, unsigned long end, 1711 vma_flags_t *vma_flags_ptr) 1712 { 1713 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1714 const vma_flags_t vma_flags = *vma_flags_ptr; 1715 struct vm_area_struct *ret; 1716 1717 vmg.vma_flags = vma_flags; 1718 1719 ret = vma_modify(&vmg); 1720 if (IS_ERR(ret)) 1721 return ret; 1722 1723 /* 1724 * For a merge to succeed, the flags must match those 1725 * requested. However, sticky flags may have been retained, so propagate 1726 * them to the caller. 1727 */ 1728 if (vmg.state == VMA_MERGE_SUCCESS) 1729 *vma_flags_ptr = ret->flags; 1730 return ret; 1731 } 1732 1733 struct vm_area_struct *vma_modify_name(struct vma_iterator *vmi, 1734 struct vm_area_struct *prev, struct vm_area_struct *vma, 1735 unsigned long start, unsigned long end, 1736 struct anon_vma_name *new_name) 1737 { 1738 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1739 1740 vmg.anon_name = new_name; 1741 1742 return vma_modify(&vmg); 1743 } 1744 1745 struct vm_area_struct *vma_modify_policy(struct vma_iterator *vmi, 1746 struct vm_area_struct *prev, struct vm_area_struct *vma, 1747 unsigned long start, unsigned long end, 1748 struct mempolicy *new_pol) 1749 { 1750 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1751 1752 vmg.policy = new_pol; 1753 1754 return vma_modify(&vmg); 1755 } 1756 1757 struct vm_area_struct *vma_modify_flags_uffd(struct vma_iterator *vmi, 1758 struct vm_area_struct *prev, struct vm_area_struct *vma, 1759 unsigned long start, unsigned long end, 1760 const vma_flags_t *vma_flags, struct vm_userfaultfd_ctx new_ctx, 1761 bool give_up_on_oom) 1762 { 1763 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1764 1765 vmg.vma_flags = *vma_flags; 1766 vmg.uffd_ctx = new_ctx; 1767 if (give_up_on_oom) 1768 vmg.give_up_on_oom = true; 1769 1770 return vma_modify(&vmg); 1771 } 1772 1773 /* 1774 * Expand vma by delta bytes, potentially merging with an immediately adjacent 1775 * VMA with identical properties. 1776 */ 1777 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi, 1778 struct vm_area_struct *vma, 1779 unsigned long delta) 1780 { 1781 VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta); 1782 1783 vmg.next = vma_iter_next_rewind(vmi, NULL); 1784 vmg.middle = NULL; /* We use the VMA to populate VMG fields only. */ 1785 1786 return vma_merge_new_range(&vmg); 1787 } 1788 1789 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb) 1790 { 1791 vb->count = 0; 1792 } 1793 1794 static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb) 1795 { 1796 struct address_space *mapping; 1797 int i; 1798 1799 mapping = vb->vmas[0]->vm_file->f_mapping; 1800 i_mmap_lock_write(mapping); 1801 for (i = 0; i < vb->count; i++) { 1802 VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping); 1803 __remove_shared_vm_struct(vb->vmas[i], mapping); 1804 } 1805 i_mmap_unlock_write(mapping); 1806 1807 unlink_file_vma_batch_init(vb); 1808 } 1809 1810 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb, 1811 struct vm_area_struct *vma) 1812 { 1813 if (vma->vm_file == NULL) 1814 return; 1815 1816 if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) || 1817 vb->count == ARRAY_SIZE(vb->vmas)) 1818 unlink_file_vma_batch_process(vb); 1819 1820 vb->vmas[vb->count] = vma; 1821 vb->count++; 1822 } 1823 1824 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb) 1825 { 1826 if (vb->count > 0) 1827 unlink_file_vma_batch_process(vb); 1828 } 1829 1830 static void vma_link_file(struct vm_area_struct *vma, bool hold_rmap_lock) 1831 { 1832 struct file *file = vma->vm_file; 1833 struct address_space *mapping; 1834 1835 if (file) { 1836 mapping = file->f_mapping; 1837 i_mmap_lock_write(mapping); 1838 __vma_link_file(vma, mapping); 1839 if (!hold_rmap_lock) 1840 i_mmap_unlock_write(mapping); 1841 } 1842 } 1843 1844 static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) 1845 { 1846 VMA_ITERATOR(vmi, mm, 0); 1847 1848 vma_iter_config(&vmi, vma->vm_start, vma->vm_end); 1849 if (vma_iter_prealloc(&vmi, vma)) 1850 return -ENOMEM; 1851 1852 vma_start_write(vma); 1853 vma_iter_store_new(&vmi, vma); 1854 vma_link_file(vma, /* hold_rmap_lock= */false); 1855 mm->map_count++; 1856 validate_mm(mm); 1857 return 0; 1858 } 1859 1860 /* 1861 * Copy the vma structure to a new location in the same mm, 1862 * prior to moving page table entries, to effect an mremap move. 1863 */ 1864 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 1865 unsigned long addr, unsigned long len, pgoff_t pgoff, 1866 bool *need_rmap_locks) 1867 { 1868 struct vm_area_struct *vma = *vmap; 1869 unsigned long vma_start = vma->vm_start; 1870 struct mm_struct *mm = vma->vm_mm; 1871 struct vm_area_struct *new_vma; 1872 bool faulted_in_anon_vma = true; 1873 VMA_ITERATOR(vmi, mm, addr); 1874 VMG_VMA_STATE(vmg, &vmi, NULL, vma, addr, addr + len); 1875 1876 /* 1877 * If anonymous vma has not yet been faulted, update new pgoff 1878 * to match new location, to increase its chance of merging. 1879 */ 1880 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { 1881 pgoff = addr >> PAGE_SHIFT; 1882 faulted_in_anon_vma = false; 1883 } 1884 1885 /* 1886 * If the VMA we are copying might contain a uprobe PTE, ensure 1887 * that we do not establish one upon merge. Otherwise, when mremap() 1888 * moves page tables, it will orphan the newly created PTE. 1889 */ 1890 if (vma->vm_file) 1891 vmg.skip_vma_uprobe = true; 1892 1893 new_vma = find_vma_prev(mm, addr, &vmg.prev); 1894 if (new_vma && new_vma->vm_start < addr + len) 1895 return NULL; /* should never get here */ 1896 1897 vmg.pgoff = pgoff; 1898 vmg.next = vma_iter_next_rewind(&vmi, NULL); 1899 new_vma = vma_merge_copied_range(&vmg); 1900 1901 if (new_vma) { 1902 /* 1903 * Source vma may have been merged into new_vma 1904 */ 1905 if (unlikely(vma_start >= new_vma->vm_start && 1906 vma_start < new_vma->vm_end)) { 1907 /* 1908 * The only way we can get a vma_merge with 1909 * self during an mremap is if the vma hasn't 1910 * been faulted in yet and we were allowed to 1911 * reset the dst vma->vm_pgoff to the 1912 * destination address of the mremap to allow 1913 * the merge to happen. mremap must change the 1914 * vm_pgoff linearity between src and dst vmas 1915 * (in turn preventing a vma_merge) to be 1916 * safe. It is only safe to keep the vm_pgoff 1917 * linear if there are no pages mapped yet. 1918 */ 1919 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); 1920 *vmap = vma = new_vma; 1921 } 1922 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); 1923 } else { 1924 new_vma = vm_area_dup(vma); 1925 if (!new_vma) 1926 goto out; 1927 vma_set_range(new_vma, addr, addr + len, pgoff); 1928 if (vma_dup_policy(vma, new_vma)) 1929 goto out_free_vma; 1930 if (anon_vma_clone(new_vma, vma, VMA_OP_REMAP)) 1931 goto out_free_mempol; 1932 if (new_vma->vm_file) 1933 get_file(new_vma->vm_file); 1934 if (new_vma->vm_ops && new_vma->vm_ops->open) 1935 new_vma->vm_ops->open(new_vma); 1936 if (vma_link(mm, new_vma)) 1937 goto out_vma_link; 1938 *need_rmap_locks = false; 1939 } 1940 return new_vma; 1941 1942 out_vma_link: 1943 fixup_hugetlb_reservations(new_vma); 1944 vma_close(new_vma); 1945 1946 if (new_vma->vm_file) 1947 fput(new_vma->vm_file); 1948 1949 unlink_anon_vmas(new_vma); 1950 out_free_mempol: 1951 mpol_put(vma_policy(new_vma)); 1952 out_free_vma: 1953 vm_area_free(new_vma); 1954 out: 1955 return NULL; 1956 } 1957 1958 /* 1959 * Rough compatibility check to quickly see if it's even worth looking 1960 * at sharing an anon_vma. 1961 * 1962 * They need to have the same vm_file, and the flags can only differ 1963 * in things that mprotect may change. 1964 * 1965 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that 1966 * we can merge the two vma's. For example, we refuse to merge a vma if 1967 * there is a vm_ops->close() function, because that indicates that the 1968 * driver is doing some kind of reference counting. But that doesn't 1969 * really matter for the anon_vma sharing case. 1970 */ 1971 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) 1972 { 1973 vma_flags_t diff = vma_flags_diff_pair(&a->flags, &b->flags); 1974 1975 vma_flags_clear_mask(&diff, VMA_ACCESS_FLAGS); 1976 vma_flags_clear_mask(&diff, VMA_IGNORE_MERGE_FLAGS); 1977 1978 return a->vm_end == b->vm_start && 1979 mpol_equal(vma_policy(a), vma_policy(b)) && 1980 a->vm_file == b->vm_file && 1981 vma_flags_empty(&diff) && 1982 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); 1983 } 1984 1985 /* 1986 * Do some basic sanity checking to see if we can re-use the anon_vma 1987 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be 1988 * the same as 'old', the other will be the new one that is trying 1989 * to share the anon_vma. 1990 * 1991 * NOTE! This runs with mmap_lock held for reading, so it is possible that 1992 * the anon_vma of 'old' is concurrently in the process of being set up 1993 * by another page fault trying to merge _that_. But that's ok: if it 1994 * is being set up, that automatically means that it will be a singleton 1995 * acceptable for merging, so we can do all of this optimistically. But 1996 * we do that READ_ONCE() to make sure that we never re-load the pointer. 1997 * 1998 * IOW: that the "list_is_singular()" test on the anon_vma_chain only 1999 * matters for the 'stable anon_vma' case (ie the thing we want to avoid 2000 * is to return an anon_vma that is "complex" due to having gone through 2001 * a fork). 2002 * 2003 * We also make sure that the two vma's are compatible (adjacent, 2004 * and with the same memory policies). That's all stable, even with just 2005 * a read lock on the mmap_lock. 2006 */ 2007 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, 2008 struct vm_area_struct *a, 2009 struct vm_area_struct *b) 2010 { 2011 if (anon_vma_compatible(a, b)) { 2012 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); 2013 2014 if (anon_vma && list_is_singular(&old->anon_vma_chain)) 2015 return anon_vma; 2016 } 2017 return NULL; 2018 } 2019 2020 /* 2021 * find_mergeable_anon_vma is used by anon_vma_prepare, to check 2022 * neighbouring vmas for a suitable anon_vma, before it goes off 2023 * to allocate a new anon_vma. It checks because a repetitive 2024 * sequence of mprotects and faults may otherwise lead to distinct 2025 * anon_vmas being allocated, preventing vma merge in subsequent 2026 * mprotect. 2027 */ 2028 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) 2029 { 2030 struct anon_vma *anon_vma = NULL; 2031 struct vm_area_struct *prev, *next; 2032 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end); 2033 2034 /* Try next first. */ 2035 next = vma_iter_load(&vmi); 2036 if (next) { 2037 anon_vma = reusable_anon_vma(next, vma, next); 2038 if (anon_vma) 2039 return anon_vma; 2040 } 2041 2042 prev = vma_prev(&vmi); 2043 VM_BUG_ON_VMA(prev != vma, vma); 2044 prev = vma_prev(&vmi); 2045 /* Try prev next. */ 2046 if (prev) 2047 anon_vma = reusable_anon_vma(prev, prev, vma); 2048 2049 /* 2050 * We might reach here with anon_vma == NULL if we can't find 2051 * any reusable anon_vma. 2052 * There's no absolute need to look only at touching neighbours: 2053 * we could search further afield for "compatible" anon_vmas. 2054 * But it would probably just be a waste of time searching, 2055 * or lead to too many vmas hanging off the same anon_vma. 2056 * We're trying to allow mprotect remerging later on, 2057 * not trying to minimize memory used for anon_vmas. 2058 */ 2059 return anon_vma; 2060 } 2061 2062 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops) 2063 { 2064 return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite); 2065 } 2066 2067 static bool vma_is_shared_writable(struct vm_area_struct *vma) 2068 { 2069 return vma_test_all(vma, VMA_WRITE_BIT, VMA_SHARED_BIT); 2070 } 2071 2072 static bool vma_fs_can_writeback(struct vm_area_struct *vma) 2073 { 2074 /* No managed pages to writeback. */ 2075 if (vma_test(vma, VMA_PFNMAP_BIT)) 2076 return false; 2077 2078 return vma->vm_file && vma->vm_file->f_mapping && 2079 mapping_can_writeback(vma->vm_file->f_mapping); 2080 } 2081 2082 /* 2083 * Does this VMA require the underlying folios to have their dirty state 2084 * tracked? 2085 */ 2086 bool vma_needs_dirty_tracking(struct vm_area_struct *vma) 2087 { 2088 /* Only shared, writable VMAs require dirty tracking. */ 2089 if (!vma_is_shared_writable(vma)) 2090 return false; 2091 2092 /* Does the filesystem need to be notified? */ 2093 if (vm_ops_needs_writenotify(vma->vm_ops)) 2094 return true; 2095 2096 /* 2097 * Even if the filesystem doesn't indicate a need for writenotify, if it 2098 * can writeback, dirty tracking is still required. 2099 */ 2100 return vma_fs_can_writeback(vma); 2101 } 2102 2103 /* 2104 * Some shared mappings will want the pages marked read-only 2105 * to track write events. If so, we'll downgrade vm_page_prot 2106 * to the private version (using protection_map[] without the 2107 * VM_SHARED bit). 2108 */ 2109 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) 2110 { 2111 /* If it was private or non-writable, the write bit is already clear */ 2112 if (!vma_is_shared_writable(vma)) 2113 return false; 2114 2115 /* The backer wishes to know when pages are first written to? */ 2116 if (vm_ops_needs_writenotify(vma->vm_ops)) 2117 return true; 2118 2119 /* The open routine did something to the protections that pgprot_modify 2120 * won't preserve? */ 2121 if (pgprot_val(vm_page_prot) != 2122 pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags))) 2123 return false; 2124 2125 /* 2126 * Do we need to track softdirty? hugetlb does not support softdirty 2127 * tracking yet. 2128 */ 2129 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma)) 2130 return true; 2131 2132 /* Do we need write faults for uffd-wp tracking? */ 2133 if (userfaultfd_wp(vma)) 2134 return true; 2135 2136 /* Can the mapping track the dirty pages? */ 2137 return vma_fs_can_writeback(vma); 2138 } 2139 2140 static DEFINE_MUTEX(mm_all_locks_mutex); 2141 2142 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) 2143 { 2144 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 2145 /* 2146 * The LSB of head.next can't change from under us 2147 * because we hold the mm_all_locks_mutex. 2148 */ 2149 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); 2150 /* 2151 * We can safely modify head.next after taking the 2152 * anon_vma->root->rwsem. If some other vma in this mm shares 2153 * the same anon_vma we won't take it again. 2154 * 2155 * No need of atomic instructions here, head.next 2156 * can't change from under us thanks to the 2157 * anon_vma->root->rwsem. 2158 */ 2159 if (__test_and_set_bit(0, (unsigned long *) 2160 &anon_vma->root->rb_root.rb_root.rb_node)) 2161 BUG(); 2162 } 2163 } 2164 2165 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) 2166 { 2167 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 2168 /* 2169 * AS_MM_ALL_LOCKS can't change from under us because 2170 * we hold the mm_all_locks_mutex. 2171 * 2172 * Operations on ->flags have to be atomic because 2173 * even if AS_MM_ALL_LOCKS is stable thanks to the 2174 * mm_all_locks_mutex, there may be other cpus 2175 * changing other bitflags in parallel to us. 2176 */ 2177 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) 2178 BUG(); 2179 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); 2180 } 2181 } 2182 2183 /* 2184 * This operation locks against the VM for all pte/vma/mm related 2185 * operations that could ever happen on a certain mm. This includes 2186 * vmtruncate, try_to_unmap, and all page faults. 2187 * 2188 * The caller must take the mmap_lock in write mode before calling 2189 * mm_take_all_locks(). The caller isn't allowed to release the 2190 * mmap_lock until mm_drop_all_locks() returns. 2191 * 2192 * mmap_lock in write mode is required in order to block all operations 2193 * that could modify pagetables and free pages without need of 2194 * altering the vma layout. It's also needed in write mode to avoid new 2195 * anon_vmas to be associated with existing vmas. 2196 * 2197 * A single task can't take more than one mm_take_all_locks() in a row 2198 * or it would deadlock. 2199 * 2200 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in 2201 * mapping->flags avoid to take the same lock twice, if more than one 2202 * vma in this mm is backed by the same anon_vma or address_space. 2203 * 2204 * We take locks in following order, accordingly to comment at beginning 2205 * of mm/rmap.c: 2206 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for 2207 * hugetlb mapping); 2208 * - all vmas marked locked 2209 * - all i_mmap_rwsem locks; 2210 * - all anon_vma->rwseml 2211 * 2212 * We can take all locks within these types randomly because the VM code 2213 * doesn't nest them and we protected from parallel mm_take_all_locks() by 2214 * mm_all_locks_mutex. 2215 * 2216 * mm_take_all_locks() and mm_drop_all_locks are expensive operations 2217 * that may have to take thousand of locks. 2218 * 2219 * mm_take_all_locks() can fail if it's interrupted by signals. 2220 */ 2221 int mm_take_all_locks(struct mm_struct *mm) 2222 { 2223 struct vm_area_struct *vma; 2224 struct anon_vma_chain *avc; 2225 VMA_ITERATOR(vmi, mm, 0); 2226 2227 mmap_assert_write_locked(mm); 2228 2229 mutex_lock(&mm_all_locks_mutex); 2230 2231 /* 2232 * vma_start_write() does not have a complement in mm_drop_all_locks() 2233 * because vma_start_write() is always asymmetrical; it marks a VMA as 2234 * being written to until mmap_write_unlock() or mmap_write_downgrade() 2235 * is reached. 2236 */ 2237 for_each_vma(vmi, vma) { 2238 if (signal_pending(current)) 2239 goto out_unlock; 2240 vma_start_write(vma); 2241 } 2242 2243 vma_iter_init(&vmi, mm, 0); 2244 for_each_vma(vmi, vma) { 2245 if (signal_pending(current)) 2246 goto out_unlock; 2247 if (vma->vm_file && vma->vm_file->f_mapping && 2248 is_vm_hugetlb_page(vma)) 2249 vm_lock_mapping(mm, vma->vm_file->f_mapping); 2250 } 2251 2252 vma_iter_init(&vmi, mm, 0); 2253 for_each_vma(vmi, vma) { 2254 if (signal_pending(current)) 2255 goto out_unlock; 2256 if (vma->vm_file && vma->vm_file->f_mapping && 2257 !is_vm_hugetlb_page(vma)) 2258 vm_lock_mapping(mm, vma->vm_file->f_mapping); 2259 } 2260 2261 vma_iter_init(&vmi, mm, 0); 2262 for_each_vma(vmi, vma) { 2263 if (signal_pending(current)) 2264 goto out_unlock; 2265 if (vma->anon_vma) 2266 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 2267 vm_lock_anon_vma(mm, avc->anon_vma); 2268 } 2269 2270 return 0; 2271 2272 out_unlock: 2273 mm_drop_all_locks(mm); 2274 return -EINTR; 2275 } 2276 2277 static void vm_unlock_anon_vma(struct anon_vma *anon_vma) 2278 { 2279 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 2280 /* 2281 * The LSB of head.next can't change to 0 from under 2282 * us because we hold the mm_all_locks_mutex. 2283 * 2284 * We must however clear the bitflag before unlocking 2285 * the vma so the users using the anon_vma->rb_root will 2286 * never see our bitflag. 2287 * 2288 * No need of atomic instructions here, head.next 2289 * can't change from under us until we release the 2290 * anon_vma->root->rwsem. 2291 */ 2292 if (!__test_and_clear_bit(0, (unsigned long *) 2293 &anon_vma->root->rb_root.rb_root.rb_node)) 2294 BUG(); 2295 anon_vma_unlock_write(anon_vma); 2296 } 2297 } 2298 2299 static void vm_unlock_mapping(struct address_space *mapping) 2300 { 2301 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 2302 /* 2303 * AS_MM_ALL_LOCKS can't change to 0 from under us 2304 * because we hold the mm_all_locks_mutex. 2305 */ 2306 i_mmap_unlock_write(mapping); 2307 if (!test_and_clear_bit(AS_MM_ALL_LOCKS, 2308 &mapping->flags)) 2309 BUG(); 2310 } 2311 } 2312 2313 /* 2314 * The mmap_lock cannot be released by the caller until 2315 * mm_drop_all_locks() returns. 2316 */ 2317 void mm_drop_all_locks(struct mm_struct *mm) 2318 { 2319 struct vm_area_struct *vma; 2320 struct anon_vma_chain *avc; 2321 VMA_ITERATOR(vmi, mm, 0); 2322 2323 mmap_assert_write_locked(mm); 2324 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); 2325 2326 for_each_vma(vmi, vma) { 2327 if (vma->anon_vma) 2328 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 2329 vm_unlock_anon_vma(avc->anon_vma); 2330 if (vma->vm_file && vma->vm_file->f_mapping) 2331 vm_unlock_mapping(vma->vm_file->f_mapping); 2332 } 2333 2334 mutex_unlock(&mm_all_locks_mutex); 2335 } 2336 2337 /* 2338 * We account for memory if it's a private writeable mapping, 2339 * not hugepages and VM_NORESERVE wasn't set. 2340 */ 2341 static bool accountable_mapping(struct mmap_state *map) 2342 { 2343 const struct file *file = map->file; 2344 2345 /* 2346 * hugetlb has its own accounting separate from the core VM 2347 * VM_HUGETLB may not be set yet so we cannot check for that flag. 2348 */ 2349 if (file && is_file_hugepages(file)) 2350 return false; 2351 2352 return vma_flags_test(&map->vma_flags, VMA_WRITE_BIT) && 2353 !vma_flags_test_any(&map->vma_flags, VMA_NORESERVE_BIT, 2354 VMA_SHARED_BIT); 2355 } 2356 2357 /* 2358 * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap() 2359 * operation. 2360 * @vms: The vma unmap structure 2361 * @mas_detach: The maple state with the detached maple tree 2362 * 2363 * Reattach any detached vmas, free up the maple tree used to track the vmas. 2364 * If that's not possible because the ptes are cleared (and vm_ops->closed() may 2365 * have been called), then a NULL is written over the vmas and the vmas are 2366 * removed (munmap() completed). 2367 */ 2368 static void vms_abort_munmap_vmas(struct vma_munmap_struct *vms, 2369 struct ma_state *mas_detach) 2370 { 2371 struct ma_state *mas = &vms->vmi->mas; 2372 2373 if (!vms->nr_pages) 2374 return; 2375 2376 if (vms->clear_ptes) 2377 return reattach_vmas(mas_detach); 2378 2379 /* 2380 * Aborting cannot just call the vm_ops open() because they are often 2381 * not symmetrical and state data has been lost. Resort to the old 2382 * failure method of leaving a gap where the MAP_FIXED mapping failed. 2383 */ 2384 mas_set_range(mas, vms->start, vms->end - 1); 2385 mas_store_gfp(mas, NULL, GFP_KERNEL|__GFP_NOFAIL); 2386 /* Clean up the insertion of the unfortunate gap */ 2387 vms_complete_munmap_vmas(vms, mas_detach); 2388 } 2389 2390 static void update_ksm_flags(struct mmap_state *map) 2391 { 2392 map->vma_flags = ksm_vma_flags(map->mm, map->file, map->vma_flags); 2393 } 2394 2395 static void set_desc_from_map(struct vm_area_desc *desc, 2396 const struct mmap_state *map) 2397 { 2398 desc->start = map->addr; 2399 desc->end = map->end; 2400 2401 desc->pgoff = map->pgoff; 2402 desc->vm_file = map->file; 2403 desc->vma_flags = map->vma_flags; 2404 desc->page_prot = map->page_prot; 2405 } 2406 2407 /* 2408 * __mmap_setup() - Prepare to gather any overlapping VMAs that need to be 2409 * unmapped once the map operation is completed, check limits, account mapping 2410 * and clean up any pre-existing VMAs. 2411 * 2412 * As a result it sets up the @map and @desc objects. 2413 * 2414 * @map: Mapping state. 2415 * @desc: VMA descriptor 2416 * @uf: Userfaultfd context list. 2417 * 2418 * Returns: 0 on success, error code otherwise. 2419 */ 2420 static int __mmap_setup(struct mmap_state *map, struct vm_area_desc *desc, 2421 struct list_head *uf) 2422 { 2423 int error; 2424 struct vma_iterator *vmi = map->vmi; 2425 struct vma_munmap_struct *vms = &map->vms; 2426 2427 /* Find the first overlapping VMA and initialise unmap state. */ 2428 vms->vma = vma_find(vmi, map->end); 2429 init_vma_munmap(vms, vmi, vms->vma, map->addr, map->end, uf, 2430 /* unlock = */ false); 2431 2432 /* OK, we have overlapping VMAs - prepare to unmap them. */ 2433 if (vms->vma) { 2434 mt_init_flags(&map->mt_detach, 2435 vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); 2436 mt_on_stack(map->mt_detach); 2437 mas_init(&map->mas_detach, &map->mt_detach, /* addr = */ 0); 2438 /* Prepare to unmap any existing mapping in the area */ 2439 error = vms_gather_munmap_vmas(vms, &map->mas_detach); 2440 if (error) { 2441 /* On error VMAs will already have been reattached. */ 2442 vms->nr_pages = 0; 2443 return error; 2444 } 2445 2446 map->next = vms->next; 2447 map->prev = vms->prev; 2448 } else { 2449 map->next = vma_iter_next_rewind(vmi, &map->prev); 2450 } 2451 2452 /* Check against address space limit. */ 2453 if (!may_expand_vm(map->mm, &map->vma_flags, map->pglen - vms->nr_pages)) 2454 return -ENOMEM; 2455 2456 /* Private writable mapping: check memory availability. */ 2457 if (accountable_mapping(map)) { 2458 map->charged = map->pglen; 2459 map->charged -= vms->nr_accounted; 2460 if (map->charged) { 2461 error = security_vm_enough_memory_mm(map->mm, map->charged); 2462 if (error) 2463 return error; 2464 } 2465 2466 vms->nr_accounted = 0; 2467 vma_flags_set(&map->vma_flags, VMA_ACCOUNT_BIT); 2468 } 2469 2470 /* 2471 * Clear PTEs while the vma is still in the tree so that rmap 2472 * cannot race with the freeing later in the truncate scenario. 2473 * This is also needed for mmap_file(), which is why vm_ops 2474 * close function is called. 2475 */ 2476 vms_clean_up_area(vms, &map->mas_detach); 2477 2478 set_desc_from_map(desc, map); 2479 return 0; 2480 } 2481 2482 2483 static int __mmap_new_file_vma(struct mmap_state *map, 2484 struct vm_area_struct *vma) 2485 { 2486 struct vma_iterator *vmi = map->vmi; 2487 int error; 2488 2489 vma->vm_file = map->file; 2490 if (!map->file_doesnt_need_get) 2491 get_file(map->file); 2492 2493 if (!map->file->f_op->mmap) 2494 return 0; 2495 2496 error = mmap_file(vma->vm_file, vma); 2497 if (error) { 2498 UNMAP_STATE(unmap, vmi, vma, vma->vm_start, vma->vm_end, 2499 map->prev, map->next); 2500 fput(vma->vm_file); 2501 vma->vm_file = NULL; 2502 2503 vma_iter_set(vmi, vma->vm_end); 2504 /* Undo any partial mapping done by a device driver. */ 2505 unmap_region(&unmap); 2506 return error; 2507 } 2508 2509 /* Drivers cannot alter the address of the VMA. */ 2510 WARN_ON_ONCE(map->addr != vma->vm_start); 2511 /* 2512 * Drivers should not permit writability when previously it was 2513 * disallowed. 2514 */ 2515 VM_WARN_ON_ONCE(!vma_flags_same_pair(&map->vma_flags, &vma->flags) && 2516 !vma_flags_test(&map->vma_flags, VMA_MAYWRITE_BIT) && 2517 vma_test(vma, VMA_MAYWRITE_BIT)); 2518 2519 map->file = vma->vm_file; 2520 map->vma_flags = vma->flags; 2521 2522 return 0; 2523 } 2524 2525 /* 2526 * __mmap_new_vma() - Allocate a new VMA for the region, as merging was not 2527 * possible. 2528 * 2529 * @map: Mapping state. 2530 * @vmap: Output pointer for the new VMA. 2531 * @action: Any mmap_prepare action that is still to complete. 2532 * 2533 * Returns: Zero on success, or an error. 2534 */ 2535 static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap, 2536 struct mmap_action *action) 2537 { 2538 struct vma_iterator *vmi = map->vmi; 2539 int error = 0; 2540 struct vm_area_struct *vma; 2541 2542 /* 2543 * Determine the object being mapped and call the appropriate 2544 * specific mapper. the address has already been validated, but 2545 * not unmapped, but the maps are removed from the list. 2546 */ 2547 vma = vm_area_alloc(map->mm); 2548 if (!vma) 2549 return -ENOMEM; 2550 2551 vma_iter_config(vmi, map->addr, map->end); 2552 vma_set_range(vma, map->addr, map->end, map->pgoff); 2553 vma->flags = map->vma_flags; 2554 vma->vm_page_prot = map->page_prot; 2555 2556 if (vma_iter_prealloc(vmi, vma)) { 2557 error = -ENOMEM; 2558 goto free_vma; 2559 } 2560 2561 if (map->file) 2562 error = __mmap_new_file_vma(map, vma); 2563 else if (vma_flags_test(&map->vma_flags, VMA_SHARED_BIT)) 2564 error = shmem_zero_setup(vma); 2565 else 2566 vma_set_anonymous(vma); 2567 2568 if (error) 2569 goto free_iter_vma; 2570 2571 if (!map->check_ksm_early) { 2572 update_ksm_flags(map); 2573 vma->flags = map->vma_flags; 2574 } 2575 2576 #ifdef CONFIG_SPARC64 2577 /* TODO: Fix SPARC ADI! */ 2578 WARN_ON_ONCE(!arch_validate_flags(map->vm_flags)); 2579 #endif 2580 2581 /* Lock the VMA since it is modified after insertion into VMA tree */ 2582 vma_start_write(vma); 2583 vma_iter_store_new(vmi, vma); 2584 map->mm->map_count++; 2585 vma_link_file(vma, action->hide_from_rmap_until_complete); 2586 2587 /* 2588 * vma_merge_new_range() calls khugepaged_enter_vma() too, the below 2589 * call covers the non-merge case. 2590 */ 2591 if (!vma_is_anonymous(vma)) 2592 khugepaged_enter_vma(vma, map->vm_flags); 2593 *vmap = vma; 2594 return 0; 2595 2596 free_iter_vma: 2597 vma_iter_free(vmi); 2598 free_vma: 2599 vm_area_free(vma); 2600 return error; 2601 } 2602 2603 /* 2604 * __mmap_complete() - Unmap any VMAs we overlap, account memory mapping 2605 * statistics, handle locking and finalise the VMA. 2606 * 2607 * @map: Mapping state. 2608 * @vma: Merged or newly allocated VMA for the mmap()'d region. 2609 */ 2610 static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma) 2611 { 2612 struct mm_struct *mm = map->mm; 2613 2614 perf_event_mmap(vma); 2615 2616 /* Unmap any existing mapping in the area. */ 2617 vms_complete_munmap_vmas(&map->vms, &map->mas_detach); 2618 2619 vm_stat_account(mm, vma->vm_flags, map->pglen); 2620 if (vma_test(vma, VMA_LOCKED_BIT)) { 2621 if (!vma_supports_mlock(vma)) 2622 vma_clear_flags_mask(vma, VMA_LOCKED_MASK); 2623 else 2624 mm->locked_vm += map->pglen; 2625 } 2626 2627 if (vma->vm_file) 2628 uprobe_mmap(vma); 2629 2630 /* 2631 * New (or expanded) vma always get soft dirty status. 2632 * Otherwise user-space soft-dirty page tracker won't 2633 * be able to distinguish situation when vma area unmapped, 2634 * then new mapped in-place (which must be aimed as 2635 * a completely new data area). 2636 */ 2637 if (pgtable_supports_soft_dirty()) 2638 vma_set_flags(vma, VMA_SOFTDIRTY_BIT); 2639 2640 vma_set_page_prot(vma); 2641 } 2642 2643 static int call_action_prepare(struct mmap_state *map, 2644 struct vm_area_desc *desc) 2645 { 2646 int err; 2647 2648 err = mmap_action_prepare(desc); 2649 if (err) 2650 return err; 2651 2652 return 0; 2653 } 2654 2655 /* 2656 * Invoke the f_op->mmap_prepare() callback for a file-backed mapping that 2657 * specifies it. 2658 * 2659 * This is called prior to any merge attempt, and updates whitelisted fields 2660 * that are permitted to be updated by the caller. 2661 * 2662 * All but user-defined fields will be pre-populated with original values. 2663 * 2664 * Returns 0 on success, or an error code otherwise. 2665 */ 2666 static int call_mmap_prepare(struct mmap_state *map, 2667 struct vm_area_desc *desc) 2668 { 2669 int err; 2670 2671 /* Invoke the hook. */ 2672 err = vfs_mmap_prepare(map->file, desc); 2673 if (err) 2674 return err; 2675 2676 err = call_action_prepare(map, desc); 2677 if (err) 2678 return err; 2679 2680 /* Update fields permitted to be changed. */ 2681 map->pgoff = desc->pgoff; 2682 if (desc->vm_file != map->file) { 2683 map->file_doesnt_need_get = true; 2684 map->file = desc->vm_file; 2685 } 2686 map->vma_flags = desc->vma_flags; 2687 map->page_prot = desc->page_prot; 2688 /* User-defined fields. */ 2689 map->vm_ops = desc->vm_ops; 2690 map->vm_private_data = desc->private_data; 2691 2692 return 0; 2693 } 2694 2695 static void set_vma_user_defined_fields(struct vm_area_struct *vma, 2696 struct mmap_state *map) 2697 { 2698 if (map->vm_ops) 2699 vma->vm_ops = map->vm_ops; 2700 vma->vm_private_data = map->vm_private_data; 2701 } 2702 2703 /* 2704 * Are we guaranteed no driver can change state such as to preclude KSM merging? 2705 * If so, let's set the KSM mergeable flag early so we don't break VMA merging. 2706 */ 2707 static bool can_set_ksm_flags_early(struct mmap_state *map) 2708 { 2709 struct file *file = map->file; 2710 2711 /* Anonymous mappings have no driver which can change them. */ 2712 if (!file) 2713 return true; 2714 2715 /* 2716 * If .mmap_prepare() is specified, then the driver will have already 2717 * manipulated state prior to updating KSM flags. So no need to worry 2718 * about mmap callbacks modifying VMA flags after the KSM flag has been 2719 * updated here, which could otherwise affect KSM eligibility. 2720 */ 2721 if (file->f_op->mmap_prepare) 2722 return true; 2723 2724 /* shmem is safe. */ 2725 if (shmem_file(file)) 2726 return true; 2727 2728 /* Any other .mmap callback is not safe. */ 2729 return false; 2730 } 2731 2732 static unsigned long __mmap_region(struct file *file, unsigned long addr, 2733 unsigned long len, vma_flags_t vma_flags, 2734 unsigned long pgoff, struct list_head *uf) 2735 { 2736 struct mm_struct *mm = current->mm; 2737 struct vm_area_struct *vma = NULL; 2738 bool have_mmap_prepare = file && file->f_op->mmap_prepare; 2739 VMA_ITERATOR(vmi, mm, addr); 2740 MMAP_STATE(map, mm, &vmi, addr, len, pgoff, vma_flags, file); 2741 struct vm_area_desc desc = { 2742 .mm = mm, 2743 .file = file, 2744 .action = { 2745 .type = MMAP_NOTHING, /* Default to no further action. */ 2746 }, 2747 }; 2748 bool allocated_new = false; 2749 int error; 2750 2751 map.check_ksm_early = can_set_ksm_flags_early(&map); 2752 2753 error = __mmap_setup(&map, &desc, uf); 2754 if (!error && have_mmap_prepare) 2755 error = call_mmap_prepare(&map, &desc); 2756 if (error) 2757 goto abort_munmap; 2758 2759 if (map.check_ksm_early) 2760 update_ksm_flags(&map); 2761 2762 /* Attempt to merge with adjacent VMAs... */ 2763 if (map.prev || map.next) { 2764 VMG_MMAP_STATE(vmg, &map, /* vma = */ NULL); 2765 2766 vma = vma_merge_new_range(&vmg); 2767 } 2768 2769 /* ...but if we can't, allocate a new VMA. */ 2770 if (!vma) { 2771 error = __mmap_new_vma(&map, &vma, &desc.action); 2772 if (error) 2773 goto unacct_error; 2774 allocated_new = true; 2775 } 2776 2777 if (have_mmap_prepare) 2778 set_vma_user_defined_fields(vma, &map); 2779 2780 __mmap_complete(&map, vma); 2781 2782 if (have_mmap_prepare && allocated_new) { 2783 error = mmap_action_complete(vma, &desc.action); 2784 if (error) 2785 return error; 2786 } 2787 2788 return addr; 2789 2790 /* Accounting was done by __mmap_setup(). */ 2791 unacct_error: 2792 if (map.charged) 2793 vm_unacct_memory(map.charged); 2794 abort_munmap: 2795 vms_abort_munmap_vmas(&map.vms, &map.mas_detach); 2796 return error; 2797 } 2798 2799 /** 2800 * mmap_region() - Actually perform the userland mapping of a VMA into 2801 * current->mm with known, aligned and overflow-checked @addr and @len, and 2802 * correctly determined VMA flags @vm_flags and page offset @pgoff. 2803 * 2804 * This is an internal memory management function, and should not be used 2805 * directly. 2806 * 2807 * The caller must write-lock current->mm->mmap_lock. 2808 * 2809 * @file: If a file-backed mapping, a pointer to the struct file describing the 2810 * file to be mapped, otherwise NULL. 2811 * @addr: The page-aligned address at which to perform the mapping. 2812 * @len: The page-aligned, non-zero, length of the mapping. 2813 * @vm_flags: The VMA flags which should be applied to the mapping. 2814 * @pgoff: If @file is specified, the page offset into the file, if not then 2815 * the virtual page offset in memory of the anonymous mapping. 2816 * @uf: Optionally, a pointer to a list head used for tracking userfaultfd unmap 2817 * events. 2818 * 2819 * Returns: Either an error, or the address at which the requested mapping has 2820 * been performed. 2821 */ 2822 unsigned long mmap_region(struct file *file, unsigned long addr, 2823 unsigned long len, vm_flags_t vm_flags, 2824 unsigned long pgoff, struct list_head *uf) 2825 { 2826 unsigned long ret; 2827 bool writable_file_mapping = false; 2828 const vma_flags_t vma_flags = legacy_to_vma_flags(vm_flags); 2829 2830 mmap_assert_write_locked(current->mm); 2831 2832 /* Check to see if MDWE is applicable. */ 2833 if (map_deny_write_exec(&vma_flags, &vma_flags)) 2834 return -EACCES; 2835 2836 /* Allow architectures to sanity-check the vm_flags. */ 2837 if (!arch_validate_flags(vm_flags)) 2838 return -EINVAL; 2839 2840 /* Map writable and ensure this isn't a sealed memfd. */ 2841 if (file && is_shared_maywrite(&vma_flags)) { 2842 int error = mapping_map_writable(file->f_mapping); 2843 2844 if (error) 2845 return error; 2846 writable_file_mapping = true; 2847 } 2848 2849 ret = __mmap_region(file, addr, len, vma_flags, pgoff, uf); 2850 2851 /* Clear our write mapping regardless of error. */ 2852 if (writable_file_mapping) 2853 mapping_unmap_writable(file->f_mapping); 2854 2855 validate_mm(current->mm); 2856 return ret; 2857 } 2858 2859 /** 2860 * do_brk_flags() - Increase the brk vma if the flags match. 2861 * @vmi: The vma iterator 2862 * @addr: The start address 2863 * @len: The length of the increase 2864 * @vma: The vma, 2865 * @vma_flags: The VMA Flags 2866 * 2867 * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags 2868 * do not match then create a new anonymous VMA. Eventually we may be able to 2869 * do some brk-specific accounting here. 2870 * 2871 * Returns: %0 on success, or otherwise an error. 2872 */ 2873 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, 2874 unsigned long addr, unsigned long len, vma_flags_t vma_flags) 2875 { 2876 struct mm_struct *mm = current->mm; 2877 2878 /* 2879 * Check against address space limits by the changed size 2880 * Note: This happens *after* clearing old mappings in some code paths. 2881 */ 2882 vma_flags_set_mask(&vma_flags, VMA_DATA_DEFAULT_FLAGS); 2883 vma_flags_set(&vma_flags, VMA_ACCOUNT_BIT); 2884 vma_flags_set_mask(&vma_flags, mm->def_vma_flags); 2885 2886 vma_flags = ksm_vma_flags(mm, NULL, vma_flags); 2887 if (!may_expand_vm(mm, &vma_flags, len >> PAGE_SHIFT)) 2888 return -ENOMEM; 2889 2890 if (mm->map_count > get_sysctl_max_map_count()) 2891 return -ENOMEM; 2892 2893 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) 2894 return -ENOMEM; 2895 2896 /* 2897 * Expand the existing vma if possible; Note that singular lists do not 2898 * occur after forking, so the expand will only happen on new VMAs. 2899 */ 2900 if (vma && vma->vm_end == addr) { 2901 VMG_STATE(vmg, mm, vmi, addr, addr + len, vma_flags, PHYS_PFN(addr)); 2902 2903 vmg.prev = vma; 2904 /* vmi is positioned at prev, which this mode expects. */ 2905 vmg.just_expand = true; 2906 2907 if (vma_merge_new_range(&vmg)) 2908 goto out; 2909 else if (vmg_nomem(&vmg)) 2910 goto unacct_fail; 2911 } 2912 2913 if (vma) 2914 vma_iter_next_range(vmi); 2915 /* create a vma struct for an anonymous mapping */ 2916 vma = vm_area_alloc(mm); 2917 if (!vma) 2918 goto unacct_fail; 2919 2920 vma_set_anonymous(vma); 2921 vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT); 2922 vma->flags = vma_flags; 2923 vma->vm_page_prot = vm_get_page_prot(vma_flags_to_legacy(vma_flags)); 2924 vma_start_write(vma); 2925 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) 2926 goto mas_store_fail; 2927 2928 mm->map_count++; 2929 validate_mm(mm); 2930 out: 2931 perf_event_mmap(vma); 2932 mm->total_vm += len >> PAGE_SHIFT; 2933 mm->data_vm += len >> PAGE_SHIFT; 2934 if (vma_flags_test(&vma_flags, VMA_LOCKED_BIT)) 2935 mm->locked_vm += (len >> PAGE_SHIFT); 2936 if (pgtable_supports_soft_dirty()) 2937 vma_set_flags(vma, VMA_SOFTDIRTY_BIT); 2938 return 0; 2939 2940 mas_store_fail: 2941 vm_area_free(vma); 2942 unacct_fail: 2943 vm_unacct_memory(len >> PAGE_SHIFT); 2944 return -ENOMEM; 2945 } 2946 2947 /** 2948 * unmapped_area() - Find an area between the low_limit and the high_limit with 2949 * the correct alignment and offset, all from @info. Note: current->mm is used 2950 * for the search. 2951 * 2952 * @info: The unmapped area information including the range [low_limit - 2953 * high_limit), the alignment offset and mask. 2954 * 2955 * Return: A memory address or -ENOMEM. 2956 */ 2957 unsigned long unmapped_area(struct vm_unmapped_area_info *info) 2958 { 2959 unsigned long length, gap; 2960 unsigned long low_limit, high_limit; 2961 struct vm_area_struct *tmp; 2962 VMA_ITERATOR(vmi, current->mm, 0); 2963 2964 /* Adjust search length to account for worst case alignment overhead */ 2965 length = info->length + info->align_mask + info->start_gap; 2966 if (length < info->length) 2967 return -ENOMEM; 2968 2969 low_limit = info->low_limit; 2970 if (low_limit < mmap_min_addr) 2971 low_limit = mmap_min_addr; 2972 high_limit = info->high_limit; 2973 retry: 2974 if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length)) 2975 return -ENOMEM; 2976 2977 /* 2978 * Adjust for the gap first so it doesn't interfere with the later 2979 * alignment. The first step is the minimum needed to fulfill the start 2980 * gap, the next step is the minimum to align that. It is the minimum 2981 * needed to fulfill both. 2982 */ 2983 gap = vma_iter_addr(&vmi) + info->start_gap; 2984 gap += (info->align_offset - gap) & info->align_mask; 2985 tmp = vma_next(&vmi); 2986 /* Avoid prev check if possible */ 2987 if (tmp && vma_test_any_mask(tmp, VMA_STARTGAP_FLAGS)) { 2988 if (vm_start_gap(tmp) < gap + length - 1) { 2989 low_limit = tmp->vm_end; 2990 vma_iter_reset(&vmi); 2991 goto retry; 2992 } 2993 } else { 2994 tmp = vma_prev(&vmi); 2995 if (tmp && vm_end_gap(tmp) > gap) { 2996 low_limit = vm_end_gap(tmp); 2997 vma_iter_reset(&vmi); 2998 goto retry; 2999 } 3000 } 3001 3002 return gap; 3003 } 3004 3005 /** 3006 * unmapped_area_topdown() - Find an area between the low_limit and the 3007 * high_limit with the correct alignment and offset at the highest available 3008 * address, all from @info. Note: current->mm is used for the search. 3009 * 3010 * @info: The unmapped area information including the range [low_limit - 3011 * high_limit), the alignment offset and mask. 3012 * 3013 * Return: A memory address or -ENOMEM. 3014 */ 3015 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) 3016 { 3017 unsigned long length, gap, gap_end; 3018 unsigned long low_limit, high_limit; 3019 struct vm_area_struct *tmp; 3020 VMA_ITERATOR(vmi, current->mm, 0); 3021 3022 /* Adjust search length to account for worst case alignment overhead */ 3023 length = info->length + info->align_mask + info->start_gap; 3024 if (length < info->length) 3025 return -ENOMEM; 3026 3027 low_limit = info->low_limit; 3028 if (low_limit < mmap_min_addr) 3029 low_limit = mmap_min_addr; 3030 high_limit = info->high_limit; 3031 retry: 3032 if (vma_iter_area_highest(&vmi, low_limit, high_limit, length)) 3033 return -ENOMEM; 3034 3035 gap = vma_iter_end(&vmi) - info->length; 3036 gap -= (gap - info->align_offset) & info->align_mask; 3037 gap_end = vma_iter_end(&vmi); 3038 tmp = vma_next(&vmi); 3039 /* Avoid prev check if possible */ 3040 if (tmp && vma_test_any_mask(tmp, VMA_STARTGAP_FLAGS)) { 3041 if (vm_start_gap(tmp) < gap_end) { 3042 high_limit = vm_start_gap(tmp); 3043 vma_iter_reset(&vmi); 3044 goto retry; 3045 } 3046 } else { 3047 tmp = vma_prev(&vmi); 3048 if (tmp && vm_end_gap(tmp) > gap) { 3049 high_limit = tmp->vm_start; 3050 vma_iter_reset(&vmi); 3051 goto retry; 3052 } 3053 } 3054 3055 return gap; 3056 } 3057 3058 /* 3059 * Verify that the stack growth is acceptable and 3060 * update accounting. This is shared with both the 3061 * grow-up and grow-down cases. 3062 */ 3063 static int acct_stack_growth(struct vm_area_struct *vma, 3064 unsigned long size, unsigned long grow) 3065 { 3066 struct mm_struct *mm = vma->vm_mm; 3067 unsigned long new_start; 3068 3069 /* address space limit tests */ 3070 if (!may_expand_vm(mm, &vma->flags, grow)) 3071 return -ENOMEM; 3072 3073 /* Stack limit test */ 3074 if (size > rlimit(RLIMIT_STACK)) 3075 return -ENOMEM; 3076 3077 /* mlock limit tests */ 3078 if (!mlock_future_ok(mm, vma_test(vma, VMA_LOCKED_BIT), 3079 grow << PAGE_SHIFT)) 3080 return -ENOMEM; 3081 3082 /* Check to ensure the stack will not grow into a hugetlb-only region */ 3083 new_start = vma->vm_end - size; 3084 #ifdef CONFIG_STACK_GROWSUP 3085 if (vma_test(vma, VMA_GROWSUP_BIT)) 3086 new_start = vma->vm_start; 3087 #endif 3088 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) 3089 return -EFAULT; 3090 3091 /* 3092 * Overcommit.. This must be the final test, as it will 3093 * update security statistics. 3094 */ 3095 if (security_vm_enough_memory_mm(mm, grow)) 3096 return -ENOMEM; 3097 3098 return 0; 3099 } 3100 3101 #ifdef CONFIG_STACK_GROWSUP 3102 /* 3103 * PA-RISC uses this for its stack. 3104 * vma is the last one with address > vma->vm_end. Have to extend vma. 3105 */ 3106 int expand_upwards(struct vm_area_struct *vma, unsigned long address) 3107 { 3108 struct mm_struct *mm = vma->vm_mm; 3109 struct vm_area_struct *next; 3110 unsigned long gap_addr; 3111 int error = 0; 3112 VMA_ITERATOR(vmi, mm, vma->vm_start); 3113 3114 if (!vma_test(vma, VMA_GROWSUP_BIT)) 3115 return -EFAULT; 3116 3117 mmap_assert_write_locked(mm); 3118 3119 /* Guard against exceeding limits of the address space. */ 3120 address &= PAGE_MASK; 3121 if (address >= (TASK_SIZE & PAGE_MASK)) 3122 return -ENOMEM; 3123 address += PAGE_SIZE; 3124 3125 /* Enforce stack_guard_gap */ 3126 gap_addr = address + stack_guard_gap; 3127 3128 /* Guard against overflow */ 3129 if (gap_addr < address || gap_addr > TASK_SIZE) 3130 gap_addr = TASK_SIZE; 3131 3132 next = find_vma_intersection(mm, vma->vm_end, gap_addr); 3133 if (next && vma_is_accessible(next)) { 3134 if (!vma_test(next, VMA_GROWSUP_BIT)) 3135 return -ENOMEM; 3136 /* Check that both stack segments have the same anon_vma? */ 3137 } 3138 3139 if (next) 3140 vma_iter_prev_range_limit(&vmi, address); 3141 3142 vma_iter_config(&vmi, vma->vm_start, address); 3143 if (vma_iter_prealloc(&vmi, vma)) 3144 return -ENOMEM; 3145 3146 /* We must make sure the anon_vma is allocated. */ 3147 if (unlikely(anon_vma_prepare(vma))) { 3148 vma_iter_free(&vmi); 3149 return -ENOMEM; 3150 } 3151 3152 /* Lock the VMA before expanding to prevent concurrent page faults */ 3153 vma_start_write(vma); 3154 /* We update the anon VMA tree. */ 3155 anon_vma_lock_write(vma->anon_vma); 3156 3157 /* Somebody else might have raced and expanded it already */ 3158 if (address > vma->vm_end) { 3159 unsigned long size, grow; 3160 3161 size = address - vma->vm_start; 3162 grow = (address - vma->vm_end) >> PAGE_SHIFT; 3163 3164 error = -ENOMEM; 3165 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { 3166 error = acct_stack_growth(vma, size, grow); 3167 if (!error) { 3168 if (vma_test(vma, VMA_LOCKED_BIT)) 3169 mm->locked_vm += grow; 3170 vm_stat_account(mm, vma->vm_flags, grow); 3171 anon_vma_interval_tree_pre_update_vma(vma); 3172 vma->vm_end = address; 3173 /* Overwrite old entry in mtree. */ 3174 vma_iter_store_overwrite(&vmi, vma); 3175 anon_vma_interval_tree_post_update_vma(vma); 3176 3177 perf_event_mmap(vma); 3178 } 3179 } 3180 } 3181 anon_vma_unlock_write(vma->anon_vma); 3182 vma_iter_free(&vmi); 3183 validate_mm(mm); 3184 return error; 3185 } 3186 #endif /* CONFIG_STACK_GROWSUP */ 3187 3188 /* 3189 * vma is the first one with address < vma->vm_start. Have to extend vma. 3190 * mmap_lock held for writing. 3191 */ 3192 int expand_downwards(struct vm_area_struct *vma, unsigned long address) 3193 { 3194 struct mm_struct *mm = vma->vm_mm; 3195 struct vm_area_struct *prev; 3196 int error = 0; 3197 VMA_ITERATOR(vmi, mm, vma->vm_start); 3198 3199 if (!vma_test(vma, VMA_GROWSDOWN_BIT)) 3200 return -EFAULT; 3201 3202 mmap_assert_write_locked(mm); 3203 3204 address &= PAGE_MASK; 3205 if (address < mmap_min_addr || address < FIRST_USER_ADDRESS) 3206 return -EPERM; 3207 3208 /* Enforce stack_guard_gap */ 3209 prev = vma_prev(&vmi); 3210 /* Check that both stack segments have the same anon_vma? */ 3211 if (prev) { 3212 if (!vma_test(prev, VMA_GROWSDOWN_BIT) && 3213 vma_is_accessible(prev) && 3214 (address - prev->vm_end < stack_guard_gap)) 3215 return -ENOMEM; 3216 } 3217 3218 if (prev) 3219 vma_iter_next_range_limit(&vmi, vma->vm_start); 3220 3221 vma_iter_config(&vmi, address, vma->vm_end); 3222 if (vma_iter_prealloc(&vmi, vma)) 3223 return -ENOMEM; 3224 3225 /* We must make sure the anon_vma is allocated. */ 3226 if (unlikely(anon_vma_prepare(vma))) { 3227 vma_iter_free(&vmi); 3228 return -ENOMEM; 3229 } 3230 3231 /* Lock the VMA before expanding to prevent concurrent page faults */ 3232 vma_start_write(vma); 3233 /* We update the anon VMA tree. */ 3234 anon_vma_lock_write(vma->anon_vma); 3235 3236 /* Somebody else might have raced and expanded it already */ 3237 if (address < vma->vm_start) { 3238 unsigned long size, grow; 3239 3240 size = vma->vm_end - address; 3241 grow = (vma->vm_start - address) >> PAGE_SHIFT; 3242 3243 error = -ENOMEM; 3244 if (grow <= vma->vm_pgoff) { 3245 error = acct_stack_growth(vma, size, grow); 3246 if (!error) { 3247 if (vma_test(vma, VMA_LOCKED_BIT)) 3248 mm->locked_vm += grow; 3249 vm_stat_account(mm, vma->vm_flags, grow); 3250 anon_vma_interval_tree_pre_update_vma(vma); 3251 vma->vm_start = address; 3252 vma->vm_pgoff -= grow; 3253 /* Overwrite old entry in mtree. */ 3254 vma_iter_store_overwrite(&vmi, vma); 3255 anon_vma_interval_tree_post_update_vma(vma); 3256 3257 perf_event_mmap(vma); 3258 } 3259 } 3260 } 3261 anon_vma_unlock_write(vma->anon_vma); 3262 vma_iter_free(&vmi); 3263 validate_mm(mm); 3264 return error; 3265 } 3266 3267 int __vm_munmap(unsigned long start, size_t len, bool unlock) 3268 { 3269 int ret; 3270 struct mm_struct *mm = current->mm; 3271 LIST_HEAD(uf); 3272 VMA_ITERATOR(vmi, mm, start); 3273 3274 if (mmap_write_lock_killable(mm)) 3275 return -EINTR; 3276 3277 ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock); 3278 if (ret || !unlock) 3279 mmap_write_unlock(mm); 3280 3281 userfaultfd_unmap_complete(mm, &uf); 3282 return ret; 3283 } 3284 3285 /* Insert vm structure into process list sorted by address 3286 * and into the inode's i_mmap tree. If vm_file is non-NULL 3287 * then i_mmap_rwsem is taken here. 3288 */ 3289 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) 3290 { 3291 unsigned long charged = vma_pages(vma); 3292 3293 if (find_vma_intersection(mm, vma->vm_start, vma->vm_end)) 3294 return -ENOMEM; 3295 3296 if (vma_test(vma, VMA_ACCOUNT_BIT) && 3297 security_vm_enough_memory_mm(mm, charged)) 3298 return -ENOMEM; 3299 3300 /* 3301 * The vm_pgoff of a purely anonymous vma should be irrelevant 3302 * until its first write fault, when page's anon_vma and index 3303 * are set. But now set the vm_pgoff it will almost certainly 3304 * end up with (unless mremap moves it elsewhere before that 3305 * first wfault), so /proc/pid/maps tells a consistent story. 3306 * 3307 * By setting it to reflect the virtual start address of the 3308 * vma, merges and splits can happen in a seamless way, just 3309 * using the existing file pgoff checks and manipulations. 3310 * Similarly in do_mmap and in do_brk_flags. 3311 */ 3312 if (vma_is_anonymous(vma)) { 3313 BUG_ON(vma->anon_vma); 3314 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; 3315 } 3316 3317 if (vma_link(mm, vma)) { 3318 if (vma_test(vma, VMA_ACCOUNT_BIT)) 3319 vm_unacct_memory(charged); 3320 return -ENOMEM; 3321 } 3322 3323 return 0; 3324 } 3325 3326 /** 3327 * vma_mmu_pagesize - Default MMU page size granularity for this VMA. 3328 * @vma: The user mapping. 3329 * 3330 * In the common case, the default page size used by the MMU matches the 3331 * default page size used by the kernel (see vma_kernel_pagesize()). On 3332 * architectures where it differs, an architecture-specific 'strong' version 3333 * of this symbol is required. 3334 * 3335 * The default MMU page size is not affected by Transparent Huge Pages 3336 * being in effect, or any usage of larger MMU page sizes (either through 3337 * architectural huge-page mappings or other explicit/implicit coalescing of 3338 * virtual ranges performed by the MMU). 3339 * 3340 * Return: The default MMU page size granularity for this VMA. 3341 */ 3342 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 3343 { 3344 return vma_kernel_pagesize(vma); 3345 } 3346