1 // SPDX-License-Identifier: GPL-2.0-or-later 2 3 /* 4 * VMA-specific functions. 5 */ 6 7 #include "vma_internal.h" 8 #include "vma.h" 9 10 static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next) 11 { 12 struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev; 13 /* 14 * If the vma has a ->close operation then the driver probably needs to 15 * release per-vma resources, so we don't attempt to merge those if the 16 * caller indicates the current vma may be removed as part of the merge, 17 * which is the case if we are attempting to merge the next VMA into 18 * this one. 19 */ 20 bool may_remove_vma = merge_next; 21 22 if (!mpol_equal(vmg->policy, vma_policy(vma))) 23 return false; 24 /* 25 * VM_SOFTDIRTY should not prevent from VMA merging, if we 26 * match the flags but dirty bit -- the caller should mark 27 * merged VMA as dirty. If dirty bit won't be excluded from 28 * comparison, we increase pressure on the memory system forcing 29 * the kernel to generate new VMAs when old one could be 30 * extended instead. 31 */ 32 if ((vma->vm_flags ^ vmg->flags) & ~VM_SOFTDIRTY) 33 return false; 34 if (vma->vm_file != vmg->file) 35 return false; 36 if (may_remove_vma && vma->vm_ops && vma->vm_ops->close) 37 return false; 38 if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx)) 39 return false; 40 if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name)) 41 return false; 42 return true; 43 } 44 45 static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1, 46 struct anon_vma *anon_vma2, struct vm_area_struct *vma) 47 { 48 /* 49 * The list_is_singular() test is to avoid merging VMA cloned from 50 * parents. This can improve scalability caused by anon_vma lock. 51 */ 52 if ((!anon_vma1 || !anon_vma2) && (!vma || 53 list_is_singular(&vma->anon_vma_chain))) 54 return true; 55 return anon_vma1 == anon_vma2; 56 } 57 58 /* Are the anon_vma's belonging to each VMA compatible with one another? */ 59 static inline bool are_anon_vmas_compatible(struct vm_area_struct *vma1, 60 struct vm_area_struct *vma2) 61 { 62 return is_mergeable_anon_vma(vma1->anon_vma, vma2->anon_vma, NULL); 63 } 64 65 /* 66 * init_multi_vma_prep() - Initializer for struct vma_prepare 67 * @vp: The vma_prepare struct 68 * @vma: The vma that will be altered once locked 69 * @next: The next vma if it is to be adjusted 70 * @remove: The first vma to be removed 71 * @remove2: The second vma to be removed 72 */ 73 static void init_multi_vma_prep(struct vma_prepare *vp, 74 struct vm_area_struct *vma, 75 struct vm_area_struct *next, 76 struct vm_area_struct *remove, 77 struct vm_area_struct *remove2) 78 { 79 memset(vp, 0, sizeof(struct vma_prepare)); 80 vp->vma = vma; 81 vp->anon_vma = vma->anon_vma; 82 vp->remove = remove; 83 vp->remove2 = remove2; 84 vp->adj_next = next; 85 if (!vp->anon_vma && next) 86 vp->anon_vma = next->anon_vma; 87 88 vp->file = vma->vm_file; 89 if (vp->file) 90 vp->mapping = vma->vm_file->f_mapping; 91 92 } 93 94 /* 95 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 96 * in front of (at a lower virtual address and file offset than) the vma. 97 * 98 * We cannot merge two vmas if they have differently assigned (non-NULL) 99 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 100 * 101 * We don't check here for the merged mmap wrapping around the end of pagecache 102 * indices (16TB on ia32) because do_mmap() does not permit mmap's which 103 * wrap, nor mmaps which cover the final page at index -1UL. 104 * 105 * We assume the vma may be removed as part of the merge. 106 */ 107 static bool can_vma_merge_before(struct vma_merge_struct *vmg) 108 { 109 pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start); 110 111 if (is_mergeable_vma(vmg, /* merge_next = */ true) && 112 is_mergeable_anon_vma(vmg->anon_vma, vmg->next->anon_vma, vmg->next)) { 113 if (vmg->next->vm_pgoff == vmg->pgoff + pglen) 114 return true; 115 } 116 117 return false; 118 } 119 120 /* 121 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 122 * beyond (at a higher virtual address and file offset than) the vma. 123 * 124 * We cannot merge two vmas if they have differently assigned (non-NULL) 125 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 126 * 127 * We assume that vma is not removed as part of the merge. 128 */ 129 static bool can_vma_merge_after(struct vma_merge_struct *vmg) 130 { 131 if (is_mergeable_vma(vmg, /* merge_next = */ false) && 132 is_mergeable_anon_vma(vmg->anon_vma, vmg->prev->anon_vma, vmg->prev)) { 133 if (vmg->prev->vm_pgoff + vma_pages(vmg->prev) == vmg->pgoff) 134 return true; 135 } 136 return false; 137 } 138 139 static void __vma_link_file(struct vm_area_struct *vma, 140 struct address_space *mapping) 141 { 142 if (vma_is_shared_maywrite(vma)) 143 mapping_allow_writable(mapping); 144 145 flush_dcache_mmap_lock(mapping); 146 vma_interval_tree_insert(vma, &mapping->i_mmap); 147 flush_dcache_mmap_unlock(mapping); 148 } 149 150 /* 151 * Requires inode->i_mapping->i_mmap_rwsem 152 */ 153 static void __remove_shared_vm_struct(struct vm_area_struct *vma, 154 struct address_space *mapping) 155 { 156 if (vma_is_shared_maywrite(vma)) 157 mapping_unmap_writable(mapping); 158 159 flush_dcache_mmap_lock(mapping); 160 vma_interval_tree_remove(vma, &mapping->i_mmap); 161 flush_dcache_mmap_unlock(mapping); 162 } 163 164 /* 165 * vma_prepare() - Helper function for handling locking VMAs prior to altering 166 * @vp: The initialized vma_prepare struct 167 */ 168 static void vma_prepare(struct vma_prepare *vp) 169 { 170 if (vp->file) { 171 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end); 172 173 if (vp->adj_next) 174 uprobe_munmap(vp->adj_next, vp->adj_next->vm_start, 175 vp->adj_next->vm_end); 176 177 i_mmap_lock_write(vp->mapping); 178 if (vp->insert && vp->insert->vm_file) { 179 /* 180 * Put into interval tree now, so instantiated pages 181 * are visible to arm/parisc __flush_dcache_page 182 * throughout; but we cannot insert into address 183 * space until vma start or end is updated. 184 */ 185 __vma_link_file(vp->insert, 186 vp->insert->vm_file->f_mapping); 187 } 188 } 189 190 if (vp->anon_vma) { 191 anon_vma_lock_write(vp->anon_vma); 192 anon_vma_interval_tree_pre_update_vma(vp->vma); 193 if (vp->adj_next) 194 anon_vma_interval_tree_pre_update_vma(vp->adj_next); 195 } 196 197 if (vp->file) { 198 flush_dcache_mmap_lock(vp->mapping); 199 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap); 200 if (vp->adj_next) 201 vma_interval_tree_remove(vp->adj_next, 202 &vp->mapping->i_mmap); 203 } 204 205 } 206 207 /* 208 * vma_complete- Helper function for handling the unlocking after altering VMAs, 209 * or for inserting a VMA. 210 * 211 * @vp: The vma_prepare struct 212 * @vmi: The vma iterator 213 * @mm: The mm_struct 214 */ 215 static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi, 216 struct mm_struct *mm) 217 { 218 if (vp->file) { 219 if (vp->adj_next) 220 vma_interval_tree_insert(vp->adj_next, 221 &vp->mapping->i_mmap); 222 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap); 223 flush_dcache_mmap_unlock(vp->mapping); 224 } 225 226 if (vp->remove && vp->file) { 227 __remove_shared_vm_struct(vp->remove, vp->mapping); 228 if (vp->remove2) 229 __remove_shared_vm_struct(vp->remove2, vp->mapping); 230 } else if (vp->insert) { 231 /* 232 * split_vma has split insert from vma, and needs 233 * us to insert it before dropping the locks 234 * (it may either follow vma or precede it). 235 */ 236 vma_iter_store(vmi, vp->insert); 237 mm->map_count++; 238 } 239 240 if (vp->anon_vma) { 241 anon_vma_interval_tree_post_update_vma(vp->vma); 242 if (vp->adj_next) 243 anon_vma_interval_tree_post_update_vma(vp->adj_next); 244 anon_vma_unlock_write(vp->anon_vma); 245 } 246 247 if (vp->file) { 248 i_mmap_unlock_write(vp->mapping); 249 uprobe_mmap(vp->vma); 250 251 if (vp->adj_next) 252 uprobe_mmap(vp->adj_next); 253 } 254 255 if (vp->remove) { 256 again: 257 vma_mark_detached(vp->remove, true); 258 if (vp->file) { 259 uprobe_munmap(vp->remove, vp->remove->vm_start, 260 vp->remove->vm_end); 261 fput(vp->file); 262 } 263 if (vp->remove->anon_vma) 264 anon_vma_merge(vp->vma, vp->remove); 265 mm->map_count--; 266 mpol_put(vma_policy(vp->remove)); 267 if (!vp->remove2) 268 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end); 269 vm_area_free(vp->remove); 270 271 /* 272 * In mprotect's case 6 (see comments on vma_merge), 273 * we are removing both mid and next vmas 274 */ 275 if (vp->remove2) { 276 vp->remove = vp->remove2; 277 vp->remove2 = NULL; 278 goto again; 279 } 280 } 281 if (vp->insert && vp->file) 282 uprobe_mmap(vp->insert); 283 } 284 285 /* 286 * init_vma_prep() - Initializer wrapper for vma_prepare struct 287 * @vp: The vma_prepare struct 288 * @vma: The vma that will be altered once locked 289 */ 290 static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma) 291 { 292 init_multi_vma_prep(vp, vma, NULL, NULL, NULL); 293 } 294 295 /* 296 * Can the proposed VMA be merged with the left (previous) VMA taking into 297 * account the start position of the proposed range. 298 */ 299 static bool can_vma_merge_left(struct vma_merge_struct *vmg) 300 301 { 302 return vmg->prev && vmg->prev->vm_end == vmg->start && 303 can_vma_merge_after(vmg); 304 } 305 306 /* 307 * Can the proposed VMA be merged with the right (next) VMA taking into 308 * account the end position of the proposed range. 309 * 310 * In addition, if we can merge with the left VMA, ensure that left and right 311 * anon_vma's are also compatible. 312 */ 313 static bool can_vma_merge_right(struct vma_merge_struct *vmg, 314 bool can_merge_left) 315 { 316 if (!vmg->next || vmg->end != vmg->next->vm_start || 317 !can_vma_merge_before(vmg)) 318 return false; 319 320 if (!can_merge_left) 321 return true; 322 323 /* 324 * If we can merge with prev (left) and next (right), indicating that 325 * each VMA's anon_vma is compatible with the proposed anon_vma, this 326 * does not mean prev and next are compatible with EACH OTHER. 327 * 328 * We therefore check this in addition to mergeability to either side. 329 */ 330 return are_anon_vmas_compatible(vmg->prev, vmg->next); 331 } 332 333 /* 334 * Close a vm structure and free it. 335 */ 336 void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed) 337 { 338 might_sleep(); 339 if (!closed && vma->vm_ops && vma->vm_ops->close) 340 vma->vm_ops->close(vma); 341 if (vma->vm_file) 342 fput(vma->vm_file); 343 mpol_put(vma_policy(vma)); 344 if (unreachable) 345 __vm_area_free(vma); 346 else 347 vm_area_free(vma); 348 } 349 350 /* 351 * Get rid of page table information in the indicated region. 352 * 353 * Called with the mm semaphore held. 354 */ 355 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, 356 struct vm_area_struct *prev, struct vm_area_struct *next) 357 { 358 struct mm_struct *mm = vma->vm_mm; 359 struct mmu_gather tlb; 360 361 lru_add_drain(); 362 tlb_gather_mmu(&tlb, mm); 363 update_hiwater_rss(mm); 364 unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end, 365 /* mm_wr_locked = */ true); 366 mas_set(mas, vma->vm_end); 367 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 368 next ? next->vm_start : USER_PGTABLES_CEILING, 369 /* mm_wr_locked = */ true); 370 tlb_finish_mmu(&tlb); 371 } 372 373 /* 374 * __split_vma() bypasses sysctl_max_map_count checking. We use this where it 375 * has already been checked or doesn't make sense to fail. 376 * VMA Iterator will point to the original VMA. 377 */ 378 static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, 379 unsigned long addr, int new_below) 380 { 381 struct vma_prepare vp; 382 struct vm_area_struct *new; 383 int err; 384 385 WARN_ON(vma->vm_start >= addr); 386 WARN_ON(vma->vm_end <= addr); 387 388 if (vma->vm_ops && vma->vm_ops->may_split) { 389 err = vma->vm_ops->may_split(vma, addr); 390 if (err) 391 return err; 392 } 393 394 new = vm_area_dup(vma); 395 if (!new) 396 return -ENOMEM; 397 398 if (new_below) { 399 new->vm_end = addr; 400 } else { 401 new->vm_start = addr; 402 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); 403 } 404 405 err = -ENOMEM; 406 vma_iter_config(vmi, new->vm_start, new->vm_end); 407 if (vma_iter_prealloc(vmi, new)) 408 goto out_free_vma; 409 410 err = vma_dup_policy(vma, new); 411 if (err) 412 goto out_free_vmi; 413 414 err = anon_vma_clone(new, vma); 415 if (err) 416 goto out_free_mpol; 417 418 if (new->vm_file) 419 get_file(new->vm_file); 420 421 if (new->vm_ops && new->vm_ops->open) 422 new->vm_ops->open(new); 423 424 vma_start_write(vma); 425 vma_start_write(new); 426 427 init_vma_prep(&vp, vma); 428 vp.insert = new; 429 vma_prepare(&vp); 430 vma_adjust_trans_huge(vma, vma->vm_start, addr, 0); 431 432 if (new_below) { 433 vma->vm_start = addr; 434 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT; 435 } else { 436 vma->vm_end = addr; 437 } 438 439 /* vma_complete stores the new vma */ 440 vma_complete(&vp, vmi, vma->vm_mm); 441 validate_mm(vma->vm_mm); 442 443 /* Success. */ 444 if (new_below) 445 vma_next(vmi); 446 else 447 vma_prev(vmi); 448 449 return 0; 450 451 out_free_mpol: 452 mpol_put(vma_policy(new)); 453 out_free_vmi: 454 vma_iter_free(vmi); 455 out_free_vma: 456 vm_area_free(new); 457 return err; 458 } 459 460 /* 461 * Split a vma into two pieces at address 'addr', a new vma is allocated 462 * either for the first part or the tail. 463 */ 464 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, 465 unsigned long addr, int new_below) 466 { 467 if (vma->vm_mm->map_count >= sysctl_max_map_count) 468 return -ENOMEM; 469 470 return __split_vma(vmi, vma, addr, new_below); 471 } 472 473 /* 474 * vma has some anon_vma assigned, and is already inserted on that 475 * anon_vma's interval trees. 476 * 477 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the 478 * vma must be removed from the anon_vma's interval trees using 479 * anon_vma_interval_tree_pre_update_vma(). 480 * 481 * After the update, the vma will be reinserted using 482 * anon_vma_interval_tree_post_update_vma(). 483 * 484 * The entire update must be protected by exclusive mmap_lock and by 485 * the root anon_vma's mutex. 486 */ 487 void 488 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) 489 { 490 struct anon_vma_chain *avc; 491 492 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 493 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); 494 } 495 496 void 497 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) 498 { 499 struct anon_vma_chain *avc; 500 501 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 502 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); 503 } 504 505 /* 506 * dup_anon_vma() - Helper function to duplicate anon_vma 507 * @dst: The destination VMA 508 * @src: The source VMA 509 * @dup: Pointer to the destination VMA when successful. 510 * 511 * Returns: 0 on success. 512 */ 513 static int dup_anon_vma(struct vm_area_struct *dst, 514 struct vm_area_struct *src, struct vm_area_struct **dup) 515 { 516 /* 517 * Easily overlooked: when mprotect shifts the boundary, make sure the 518 * expanding vma has anon_vma set if the shrinking vma had, to cover any 519 * anon pages imported. 520 */ 521 if (src->anon_vma && !dst->anon_vma) { 522 int ret; 523 524 vma_assert_write_locked(dst); 525 dst->anon_vma = src->anon_vma; 526 ret = anon_vma_clone(dst, src); 527 if (ret) 528 return ret; 529 530 *dup = dst; 531 } 532 533 return 0; 534 } 535 536 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 537 void validate_mm(struct mm_struct *mm) 538 { 539 int bug = 0; 540 int i = 0; 541 struct vm_area_struct *vma; 542 VMA_ITERATOR(vmi, mm, 0); 543 544 mt_validate(&mm->mm_mt); 545 for_each_vma(vmi, vma) { 546 #ifdef CONFIG_DEBUG_VM_RB 547 struct anon_vma *anon_vma = vma->anon_vma; 548 struct anon_vma_chain *avc; 549 #endif 550 unsigned long vmi_start, vmi_end; 551 bool warn = 0; 552 553 vmi_start = vma_iter_addr(&vmi); 554 vmi_end = vma_iter_end(&vmi); 555 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm)) 556 warn = 1; 557 558 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm)) 559 warn = 1; 560 561 if (warn) { 562 pr_emerg("issue in %s\n", current->comm); 563 dump_stack(); 564 dump_vma(vma); 565 pr_emerg("tree range: %px start %lx end %lx\n", vma, 566 vmi_start, vmi_end - 1); 567 vma_iter_dump_tree(&vmi); 568 } 569 570 #ifdef CONFIG_DEBUG_VM_RB 571 if (anon_vma) { 572 anon_vma_lock_read(anon_vma); 573 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 574 anon_vma_interval_tree_verify(avc); 575 anon_vma_unlock_read(anon_vma); 576 } 577 #endif 578 i++; 579 } 580 if (i != mm->map_count) { 581 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i); 582 bug = 1; 583 } 584 VM_BUG_ON_MM(bug, mm); 585 } 586 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ 587 588 /* Actually perform the VMA merge operation. */ 589 static int commit_merge(struct vma_merge_struct *vmg, 590 struct vm_area_struct *remove) 591 { 592 struct vma_prepare vp; 593 594 init_multi_vma_prep(&vp, vmg->vma, NULL, remove, NULL); 595 596 /* Note: vma iterator must be pointing to 'start'. */ 597 vma_iter_config(vmg->vmi, vmg->start, vmg->end); 598 599 if (vma_iter_prealloc(vmg->vmi, vmg->vma)) 600 return -ENOMEM; 601 602 vma_prepare(&vp); 603 vma_adjust_trans_huge(vmg->vma, vmg->start, vmg->end, 0); 604 vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff); 605 606 vma_iter_store(vmg->vmi, vmg->vma); 607 608 vma_complete(&vp, vmg->vmi, vmg->vma->vm_mm); 609 610 return 0; 611 } 612 613 /* 614 * vma_merge_new_range - Attempt to merge a new VMA into address space 615 * 616 * @vmg: Describes the VMA we are adding, in the range @vmg->start to @vmg->end 617 * (exclusive), which we try to merge with any adjacent VMAs if possible. 618 * 619 * We are about to add a VMA to the address space starting at @vmg->start and 620 * ending at @vmg->end. There are three different possible scenarios: 621 * 622 * 1. There is a VMA with identical properties immediately adjacent to the 623 * proposed new VMA [@vmg->start, @vmg->end) either before or after it - 624 * EXPAND that VMA: 625 * 626 * Proposed: |-----| or |-----| 627 * Existing: |----| |----| 628 * 629 * 2. There are VMAs with identical properties immediately adjacent to the 630 * proposed new VMA [@vmg->start, @vmg->end) both before AND after it - 631 * EXPAND the former and REMOVE the latter: 632 * 633 * Proposed: |-----| 634 * Existing: |----| |----| 635 * 636 * 3. There are no VMAs immediately adjacent to the proposed new VMA or those 637 * VMAs do not have identical attributes - NO MERGE POSSIBLE. 638 * 639 * In instances where we can merge, this function returns the expanded VMA which 640 * will have its range adjusted accordingly and the underlying maple tree also 641 * adjusted. 642 * 643 * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer 644 * to the VMA we expanded. 645 * 646 * This function adjusts @vmg to provide @vmg->next if not already specified, 647 * and adjusts [@vmg->start, @vmg->end) to span the expanded range. 648 * 649 * ASSUMPTIONS: 650 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock. 651 * - The caller must have determined that [@vmg->start, @vmg->end) is empty, 652 other than VMAs that will be unmapped should the operation succeed. 653 * - The caller must have specified the previous vma in @vmg->prev. 654 * - The caller must have specified the next vma in @vmg->next. 655 * - The caller must have positioned the vmi at or before the gap. 656 */ 657 struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) 658 { 659 struct vm_area_struct *prev = vmg->prev; 660 struct vm_area_struct *next = vmg->next; 661 unsigned long start = vmg->start; 662 unsigned long end = vmg->end; 663 pgoff_t pgoff = vmg->pgoff; 664 pgoff_t pglen = PHYS_PFN(end - start); 665 bool can_merge_left, can_merge_right; 666 667 mmap_assert_write_locked(vmg->mm); 668 VM_WARN_ON(vmg->vma); 669 /* vmi must point at or before the gap. */ 670 VM_WARN_ON(vma_iter_addr(vmg->vmi) > end); 671 672 vmg->state = VMA_MERGE_NOMERGE; 673 674 /* Special VMAs are unmergeable, also if no prev/next. */ 675 if ((vmg->flags & VM_SPECIAL) || (!prev && !next)) 676 return NULL; 677 678 can_merge_left = can_vma_merge_left(vmg); 679 can_merge_right = can_vma_merge_right(vmg, can_merge_left); 680 681 /* If we can merge with the next VMA, adjust vmg accordingly. */ 682 if (can_merge_right) { 683 vmg->end = next->vm_end; 684 vmg->vma = next; 685 vmg->pgoff = next->vm_pgoff - pglen; 686 } 687 688 /* If we can merge with the previous VMA, adjust vmg accordingly. */ 689 if (can_merge_left) { 690 vmg->start = prev->vm_start; 691 vmg->vma = prev; 692 vmg->pgoff = prev->vm_pgoff; 693 694 vma_prev(vmg->vmi); /* Equivalent to going to the previous range */ 695 } 696 697 /* 698 * Now try to expand adjacent VMA(s). This takes care of removing the 699 * following VMA if we have VMAs on both sides. 700 */ 701 if (vmg->vma && !vma_expand(vmg)) { 702 khugepaged_enter_vma(vmg->vma, vmg->flags); 703 vmg->state = VMA_MERGE_SUCCESS; 704 return vmg->vma; 705 } 706 707 /* If expansion failed, reset state. Allows us to retry merge later. */ 708 vmg->vma = NULL; 709 vmg->start = start; 710 vmg->end = end; 711 vmg->pgoff = pgoff; 712 if (vmg->vma == prev) 713 vma_iter_set(vmg->vmi, start); 714 715 return NULL; 716 } 717 718 /* 719 * vma_expand - Expand an existing VMA 720 * 721 * @vmg: Describes a VMA expansion operation. 722 * 723 * Expand @vma to vmg->start and vmg->end. Can expand off the start and end. 724 * Will expand over vmg->next if it's different from vmg->vma and vmg->end == 725 * vmg->next->vm_end. Checking if the vmg->vma can expand and merge with 726 * vmg->next needs to be handled by the caller. 727 * 728 * Returns: 0 on success. 729 * 730 * ASSUMPTIONS: 731 * - The caller must hold a WRITE lock on vmg->vma->mm->mmap_lock. 732 * - The caller must have set @vmg->vma and @vmg->next. 733 */ 734 int vma_expand(struct vma_merge_struct *vmg) 735 { 736 struct vm_area_struct *anon_dup = NULL; 737 bool remove_next = false; 738 struct vm_area_struct *vma = vmg->vma; 739 struct vm_area_struct *next = vmg->next; 740 741 mmap_assert_write_locked(vmg->mm); 742 743 vma_start_write(vma); 744 if (next && (vma != next) && (vmg->end == next->vm_end)) { 745 int ret; 746 747 remove_next = true; 748 vma_start_write(next); 749 ret = dup_anon_vma(vma, next, &anon_dup); 750 if (ret) 751 return ret; 752 } 753 754 /* Not merging but overwriting any part of next is not handled. */ 755 VM_WARN_ON(next && !remove_next && 756 next != vma && vmg->end > next->vm_start); 757 /* Only handles expanding */ 758 VM_WARN_ON(vma->vm_start < vmg->start || vma->vm_end > vmg->end); 759 760 if (commit_merge(vmg, remove_next ? next : NULL)) 761 goto nomem; 762 763 return 0; 764 765 nomem: 766 vmg->state = VMA_MERGE_ERROR_NOMEM; 767 if (anon_dup) 768 unlink_anon_vmas(anon_dup); 769 return -ENOMEM; 770 } 771 772 /* 773 * vma_shrink() - Reduce an existing VMAs memory area 774 * @vmi: The vma iterator 775 * @vma: The VMA to modify 776 * @start: The new start 777 * @end: The new end 778 * 779 * Returns: 0 on success, -ENOMEM otherwise 780 */ 781 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, 782 unsigned long start, unsigned long end, pgoff_t pgoff) 783 { 784 struct vma_prepare vp; 785 786 WARN_ON((vma->vm_start != start) && (vma->vm_end != end)); 787 788 if (vma->vm_start < start) 789 vma_iter_config(vmi, vma->vm_start, start); 790 else 791 vma_iter_config(vmi, end, vma->vm_end); 792 793 if (vma_iter_prealloc(vmi, NULL)) 794 return -ENOMEM; 795 796 vma_start_write(vma); 797 798 init_vma_prep(&vp, vma); 799 vma_prepare(&vp); 800 vma_adjust_trans_huge(vma, start, end, 0); 801 802 vma_iter_clear(vmi); 803 vma_set_range(vma, start, end, pgoff); 804 vma_complete(&vp, vmi, vma->vm_mm); 805 validate_mm(vma->vm_mm); 806 return 0; 807 } 808 809 static inline void vms_clear_ptes(struct vma_munmap_struct *vms, 810 struct ma_state *mas_detach, bool mm_wr_locked) 811 { 812 struct mmu_gather tlb; 813 814 if (!vms->clear_ptes) /* Nothing to do */ 815 return; 816 817 /* 818 * We can free page tables without write-locking mmap_lock because VMAs 819 * were isolated before we downgraded mmap_lock. 820 */ 821 mas_set(mas_detach, 1); 822 lru_add_drain(); 823 tlb_gather_mmu(&tlb, vms->vma->vm_mm); 824 update_hiwater_rss(vms->vma->vm_mm); 825 unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end, 826 vms->vma_count, mm_wr_locked); 827 828 mas_set(mas_detach, 1); 829 /* start and end may be different if there is no prev or next vma. */ 830 free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start, 831 vms->unmap_end, mm_wr_locked); 832 tlb_finish_mmu(&tlb); 833 vms->clear_ptes = false; 834 } 835 836 void vms_clean_up_area(struct vma_munmap_struct *vms, 837 struct ma_state *mas_detach) 838 { 839 struct vm_area_struct *vma; 840 841 if (!vms->nr_pages) 842 return; 843 844 vms_clear_ptes(vms, mas_detach, true); 845 mas_set(mas_detach, 0); 846 mas_for_each(mas_detach, vma, ULONG_MAX) 847 if (vma->vm_ops && vma->vm_ops->close) 848 vma->vm_ops->close(vma); 849 vms->closed_vm_ops = true; 850 } 851 852 /* 853 * vms_complete_munmap_vmas() - Finish the munmap() operation 854 * @vms: The vma munmap struct 855 * @mas_detach: The maple state of the detached vmas 856 * 857 * This updates the mm_struct, unmaps the region, frees the resources 858 * used for the munmap() and may downgrade the lock - if requested. Everything 859 * needed to be done once the vma maple tree is updated. 860 */ 861 void vms_complete_munmap_vmas(struct vma_munmap_struct *vms, 862 struct ma_state *mas_detach) 863 { 864 struct vm_area_struct *vma; 865 struct mm_struct *mm; 866 867 mm = current->mm; 868 mm->map_count -= vms->vma_count; 869 mm->locked_vm -= vms->locked_vm; 870 if (vms->unlock) 871 mmap_write_downgrade(mm); 872 873 if (!vms->nr_pages) 874 return; 875 876 vms_clear_ptes(vms, mas_detach, !vms->unlock); 877 /* Update high watermark before we lower total_vm */ 878 update_hiwater_vm(mm); 879 /* Stat accounting */ 880 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages); 881 /* Paranoid bookkeeping */ 882 VM_WARN_ON(vms->exec_vm > mm->exec_vm); 883 VM_WARN_ON(vms->stack_vm > mm->stack_vm); 884 VM_WARN_ON(vms->data_vm > mm->data_vm); 885 mm->exec_vm -= vms->exec_vm; 886 mm->stack_vm -= vms->stack_vm; 887 mm->data_vm -= vms->data_vm; 888 889 /* Remove and clean up vmas */ 890 mas_set(mas_detach, 0); 891 mas_for_each(mas_detach, vma, ULONG_MAX) 892 remove_vma(vma, /* = */ false, vms->closed_vm_ops); 893 894 vm_unacct_memory(vms->nr_accounted); 895 validate_mm(mm); 896 if (vms->unlock) 897 mmap_read_unlock(mm); 898 899 __mt_destroy(mas_detach->tree); 900 } 901 902 /* 903 * vms_gather_munmap_vmas() - Put all VMAs within a range into a maple tree 904 * for removal at a later date. Handles splitting first and last if necessary 905 * and marking the vmas as isolated. 906 * 907 * @vms: The vma munmap struct 908 * @mas_detach: The maple state tracking the detached tree 909 * 910 * Return: 0 on success, -EPERM on mseal vmas, -ENOMEM otherwise 911 */ 912 int vms_gather_munmap_vmas(struct vma_munmap_struct *vms, 913 struct ma_state *mas_detach) 914 { 915 struct vm_area_struct *next = NULL; 916 int error = -ENOMEM; 917 918 /* 919 * If we need to split any vma, do it now to save pain later. 920 * Does it split the first one? 921 */ 922 if (vms->start > vms->vma->vm_start) { 923 924 /* 925 * Make sure that map_count on return from munmap() will 926 * not exceed its limit; but let map_count go just above 927 * its limit temporarily, to help free resources as expected. 928 */ 929 if (vms->end < vms->vma->vm_end && 930 vms->vma->vm_mm->map_count >= sysctl_max_map_count) 931 goto map_count_exceeded; 932 933 /* Don't bother splitting the VMA if we can't unmap it anyway */ 934 if (!can_modify_vma(vms->vma)) { 935 error = -EPERM; 936 goto start_split_failed; 937 } 938 939 if (__split_vma(vms->vmi, vms->vma, vms->start, 1)) 940 goto start_split_failed; 941 } 942 vms->prev = vma_prev(vms->vmi); 943 if (vms->prev) 944 vms->unmap_start = vms->prev->vm_end; 945 946 /* 947 * Detach a range of VMAs from the mm. Using next as a temp variable as 948 * it is always overwritten. 949 */ 950 for_each_vma_range(*(vms->vmi), next, vms->end) { 951 long nrpages; 952 953 if (!can_modify_vma(next)) { 954 error = -EPERM; 955 goto modify_vma_failed; 956 } 957 /* Does it split the end? */ 958 if (next->vm_end > vms->end) { 959 if (__split_vma(vms->vmi, next, vms->end, 0)) 960 goto end_split_failed; 961 } 962 vma_start_write(next); 963 mas_set(mas_detach, vms->vma_count++); 964 if (mas_store_gfp(mas_detach, next, GFP_KERNEL)) 965 goto munmap_gather_failed; 966 967 vma_mark_detached(next, true); 968 nrpages = vma_pages(next); 969 970 vms->nr_pages += nrpages; 971 if (next->vm_flags & VM_LOCKED) 972 vms->locked_vm += nrpages; 973 974 if (next->vm_flags & VM_ACCOUNT) 975 vms->nr_accounted += nrpages; 976 977 if (is_exec_mapping(next->vm_flags)) 978 vms->exec_vm += nrpages; 979 else if (is_stack_mapping(next->vm_flags)) 980 vms->stack_vm += nrpages; 981 else if (is_data_mapping(next->vm_flags)) 982 vms->data_vm += nrpages; 983 984 if (unlikely(vms->uf)) { 985 /* 986 * If userfaultfd_unmap_prep returns an error the vmas 987 * will remain split, but userland will get a 988 * highly unexpected error anyway. This is no 989 * different than the case where the first of the two 990 * __split_vma fails, but we don't undo the first 991 * split, despite we could. This is unlikely enough 992 * failure that it's not worth optimizing it for. 993 */ 994 if (userfaultfd_unmap_prep(next, vms->start, vms->end, 995 vms->uf)) 996 goto userfaultfd_error; 997 } 998 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 999 BUG_ON(next->vm_start < vms->start); 1000 BUG_ON(next->vm_start > vms->end); 1001 #endif 1002 } 1003 1004 vms->next = vma_next(vms->vmi); 1005 if (vms->next) 1006 vms->unmap_end = vms->next->vm_start; 1007 1008 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 1009 /* Make sure no VMAs are about to be lost. */ 1010 { 1011 MA_STATE(test, mas_detach->tree, 0, 0); 1012 struct vm_area_struct *vma_mas, *vma_test; 1013 int test_count = 0; 1014 1015 vma_iter_set(vms->vmi, vms->start); 1016 rcu_read_lock(); 1017 vma_test = mas_find(&test, vms->vma_count - 1); 1018 for_each_vma_range(*(vms->vmi), vma_mas, vms->end) { 1019 BUG_ON(vma_mas != vma_test); 1020 test_count++; 1021 vma_test = mas_next(&test, vms->vma_count - 1); 1022 } 1023 rcu_read_unlock(); 1024 BUG_ON(vms->vma_count != test_count); 1025 } 1026 #endif 1027 1028 while (vma_iter_addr(vms->vmi) > vms->start) 1029 vma_iter_prev_range(vms->vmi); 1030 1031 vms->clear_ptes = true; 1032 return 0; 1033 1034 userfaultfd_error: 1035 munmap_gather_failed: 1036 end_split_failed: 1037 modify_vma_failed: 1038 reattach_vmas(mas_detach); 1039 start_split_failed: 1040 map_count_exceeded: 1041 return error; 1042 } 1043 1044 /* 1045 * do_vmi_align_munmap() - munmap the aligned region from @start to @end. 1046 * @vmi: The vma iterator 1047 * @vma: The starting vm_area_struct 1048 * @mm: The mm_struct 1049 * @start: The aligned start address to munmap. 1050 * @end: The aligned end address to munmap. 1051 * @uf: The userfaultfd list_head 1052 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on 1053 * success. 1054 * 1055 * Return: 0 on success and drops the lock if so directed, error and leaves the 1056 * lock held otherwise. 1057 */ 1058 int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 1059 struct mm_struct *mm, unsigned long start, unsigned long end, 1060 struct list_head *uf, bool unlock) 1061 { 1062 struct maple_tree mt_detach; 1063 MA_STATE(mas_detach, &mt_detach, 0, 0); 1064 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); 1065 mt_on_stack(mt_detach); 1066 struct vma_munmap_struct vms; 1067 int error; 1068 1069 init_vma_munmap(&vms, vmi, vma, start, end, uf, unlock); 1070 error = vms_gather_munmap_vmas(&vms, &mas_detach); 1071 if (error) 1072 goto gather_failed; 1073 1074 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL); 1075 if (error) 1076 goto clear_tree_failed; 1077 1078 /* Point of no return */ 1079 vms_complete_munmap_vmas(&vms, &mas_detach); 1080 return 0; 1081 1082 clear_tree_failed: 1083 reattach_vmas(&mas_detach); 1084 gather_failed: 1085 validate_mm(mm); 1086 return error; 1087 } 1088 1089 /* 1090 * do_vmi_munmap() - munmap a given range. 1091 * @vmi: The vma iterator 1092 * @mm: The mm_struct 1093 * @start: The start address to munmap 1094 * @len: The length of the range to munmap 1095 * @uf: The userfaultfd list_head 1096 * @unlock: set to true if the user wants to drop the mmap_lock on success 1097 * 1098 * This function takes a @mas that is either pointing to the previous VMA or set 1099 * to MA_START and sets it up to remove the mapping(s). The @len will be 1100 * aligned. 1101 * 1102 * Return: 0 on success and drops the lock if so directed, error and leaves the 1103 * lock held otherwise. 1104 */ 1105 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, 1106 unsigned long start, size_t len, struct list_head *uf, 1107 bool unlock) 1108 { 1109 unsigned long end; 1110 struct vm_area_struct *vma; 1111 1112 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) 1113 return -EINVAL; 1114 1115 end = start + PAGE_ALIGN(len); 1116 if (end == start) 1117 return -EINVAL; 1118 1119 /* Find the first overlapping VMA */ 1120 vma = vma_find(vmi, end); 1121 if (!vma) { 1122 if (unlock) 1123 mmap_write_unlock(mm); 1124 return 0; 1125 } 1126 1127 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); 1128 } 1129 1130 /* 1131 * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name), 1132 * figure out whether that can be merged with its predecessor or its 1133 * successor. Or both (it neatly fills a hole). 1134 * 1135 * In most cases - when called for mmap, brk or mremap - [addr,end) is 1136 * certain not to be mapped by the time vma_merge is called; but when 1137 * called for mprotect, it is certain to be already mapped (either at 1138 * an offset within prev, or at the start of next), and the flags of 1139 * this area are about to be changed to vm_flags - and the no-change 1140 * case has already been eliminated. 1141 * 1142 * The following mprotect cases have to be considered, where **** is 1143 * the area passed down from mprotect_fixup, never extending beyond one 1144 * vma, PPPP is the previous vma, CCCC is a concurrent vma that starts 1145 * at the same address as **** and is of the same or larger span, and 1146 * NNNN the next vma after ****: 1147 * 1148 * **** **** **** 1149 * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPCCCCCC 1150 * cannot merge might become might become 1151 * PPNNNNNNNNNN PPPPPPPPPPCC 1152 * mmap, brk or case 4 below case 5 below 1153 * mremap move: 1154 * **** **** 1155 * PPPP NNNN PPPPCCCCNNNN 1156 * might become might become 1157 * PPPPPPPPPPPP 1 or PPPPPPPPPPPP 6 or 1158 * PPPPPPPPNNNN 2 or PPPPPPPPNNNN 7 or 1159 * PPPPNNNNNNNN 3 PPPPNNNNNNNN 8 1160 * 1161 * It is important for case 8 that the vma CCCC overlapping the 1162 * region **** is never going to extended over NNNN. Instead NNNN must 1163 * be extended in region **** and CCCC must be removed. This way in 1164 * all cases where vma_merge succeeds, the moment vma_merge drops the 1165 * rmap_locks, the properties of the merged vma will be already 1166 * correct for the whole merged range. Some of those properties like 1167 * vm_page_prot/vm_flags may be accessed by rmap_walks and they must 1168 * be correct for the whole merged range immediately after the 1169 * rmap_locks are released. Otherwise if NNNN would be removed and 1170 * CCCC would be extended over the NNNN range, remove_migration_ptes 1171 * or other rmap walkers (if working on addresses beyond the "end" 1172 * parameter) may establish ptes with the wrong permissions of CCCC 1173 * instead of the right permissions of NNNN. 1174 * 1175 * In the code below: 1176 * PPPP is represented by *prev 1177 * CCCC is represented by *curr or not represented at all (NULL) 1178 * NNNN is represented by *next or not represented at all (NULL) 1179 * **** is not represented - it will be merged and the vma containing the 1180 * area is returned, or the function will return NULL 1181 */ 1182 static struct vm_area_struct *vma_merge(struct vma_merge_struct *vmg) 1183 { 1184 struct mm_struct *mm = vmg->mm; 1185 struct vm_area_struct *prev = vmg->prev; 1186 struct vm_area_struct *curr, *next, *res; 1187 struct vm_area_struct *vma, *adjust, *remove, *remove2; 1188 struct vm_area_struct *anon_dup = NULL; 1189 struct vma_prepare vp; 1190 pgoff_t vma_pgoff; 1191 int err = 0; 1192 bool merge_prev = false; 1193 bool merge_next = false; 1194 bool vma_expanded = false; 1195 unsigned long addr = vmg->start; 1196 unsigned long end = vmg->end; 1197 unsigned long vma_start = addr; 1198 unsigned long vma_end = end; 1199 pgoff_t pglen = PHYS_PFN(end - addr); 1200 long adj_start = 0; 1201 1202 vmg->state = VMA_MERGE_NOMERGE; 1203 1204 /* 1205 * We later require that vma->vm_flags == vm_flags, 1206 * so this tests vma->vm_flags & VM_SPECIAL, too. 1207 */ 1208 if (vmg->flags & VM_SPECIAL) 1209 return NULL; 1210 1211 /* Does the input range span an existing VMA? (cases 5 - 8) */ 1212 curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end); 1213 1214 if (!curr || /* cases 1 - 4 */ 1215 end == curr->vm_end) /* cases 6 - 8, adjacent VMA */ 1216 next = vmg->next = vma_lookup(mm, end); 1217 else 1218 next = vmg->next = NULL; /* case 5 */ 1219 1220 if (prev) { 1221 vma_start = prev->vm_start; 1222 vma_pgoff = prev->vm_pgoff; 1223 1224 /* Can we merge the predecessor? */ 1225 if (addr == prev->vm_end && can_vma_merge_after(vmg)) { 1226 merge_prev = true; 1227 vma_prev(vmg->vmi); 1228 } 1229 } 1230 1231 /* Can we merge the successor? */ 1232 if (next && can_vma_merge_before(vmg)) { 1233 merge_next = true; 1234 } 1235 1236 /* Verify some invariant that must be enforced by the caller. */ 1237 VM_WARN_ON(prev && addr <= prev->vm_start); 1238 VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end)); 1239 VM_WARN_ON(addr >= end); 1240 1241 if (!merge_prev && !merge_next) 1242 return NULL; /* Not mergeable. */ 1243 1244 if (merge_prev) 1245 vma_start_write(prev); 1246 1247 res = vma = prev; 1248 remove = remove2 = adjust = NULL; 1249 1250 /* Can we merge both the predecessor and the successor? */ 1251 if (merge_prev && merge_next && 1252 is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) { 1253 vma_start_write(next); 1254 remove = next; /* case 1 */ 1255 vma_end = next->vm_end; 1256 err = dup_anon_vma(prev, next, &anon_dup); 1257 if (curr) { /* case 6 */ 1258 vma_start_write(curr); 1259 remove = curr; 1260 remove2 = next; 1261 /* 1262 * Note that the dup_anon_vma below cannot overwrite err 1263 * since the first caller would do nothing unless next 1264 * has an anon_vma. 1265 */ 1266 if (!next->anon_vma) 1267 err = dup_anon_vma(prev, curr, &anon_dup); 1268 } 1269 } else if (merge_prev) { /* case 2 */ 1270 if (curr) { 1271 vma_start_write(curr); 1272 if (end == curr->vm_end) { /* case 7 */ 1273 /* 1274 * can_vma_merge_after() assumed we would not be 1275 * removing prev vma, so it skipped the check 1276 * for vm_ops->close, but we are removing curr 1277 */ 1278 if (curr->vm_ops && curr->vm_ops->close) 1279 err = -EINVAL; 1280 remove = curr; 1281 } else { /* case 5 */ 1282 adjust = curr; 1283 adj_start = (end - curr->vm_start); 1284 } 1285 if (!err) 1286 err = dup_anon_vma(prev, curr, &anon_dup); 1287 } 1288 } else { /* merge_next */ 1289 vma_start_write(next); 1290 res = next; 1291 if (prev && addr < prev->vm_end) { /* case 4 */ 1292 vma_start_write(prev); 1293 vma_end = addr; 1294 adjust = next; 1295 adj_start = -(prev->vm_end - addr); 1296 err = dup_anon_vma(next, prev, &anon_dup); 1297 } else { 1298 /* 1299 * Note that cases 3 and 8 are the ONLY ones where prev 1300 * is permitted to be (but is not necessarily) NULL. 1301 */ 1302 vma = next; /* case 3 */ 1303 vma_start = addr; 1304 vma_end = next->vm_end; 1305 vma_pgoff = next->vm_pgoff - pglen; 1306 if (curr) { /* case 8 */ 1307 vma_pgoff = curr->vm_pgoff; 1308 vma_start_write(curr); 1309 remove = curr; 1310 err = dup_anon_vma(next, curr, &anon_dup); 1311 } 1312 } 1313 } 1314 1315 /* Error in anon_vma clone. */ 1316 if (err) 1317 goto anon_vma_fail; 1318 1319 if (vma_start < vma->vm_start || vma_end > vma->vm_end) 1320 vma_expanded = true; 1321 1322 if (vma_expanded) { 1323 vma_iter_config(vmg->vmi, vma_start, vma_end); 1324 } else { 1325 vma_iter_config(vmg->vmi, adjust->vm_start + adj_start, 1326 adjust->vm_end); 1327 } 1328 1329 if (vma_iter_prealloc(vmg->vmi, vma)) 1330 goto prealloc_fail; 1331 1332 init_multi_vma_prep(&vp, vma, adjust, remove, remove2); 1333 VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma && 1334 vp.anon_vma != adjust->anon_vma); 1335 1336 vma_prepare(&vp); 1337 vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start); 1338 vma_set_range(vma, vma_start, vma_end, vma_pgoff); 1339 1340 if (vma_expanded) 1341 vma_iter_store(vmg->vmi, vma); 1342 1343 if (adj_start) { 1344 adjust->vm_start += adj_start; 1345 adjust->vm_pgoff += adj_start >> PAGE_SHIFT; 1346 if (adj_start < 0) { 1347 WARN_ON(vma_expanded); 1348 vma_iter_store(vmg->vmi, next); 1349 } 1350 } 1351 1352 vma_complete(&vp, vmg->vmi, mm); 1353 validate_mm(mm); 1354 khugepaged_enter_vma(res, vmg->flags); 1355 1356 vmg->state = VMA_MERGE_SUCCESS; 1357 return res; 1358 1359 prealloc_fail: 1360 vmg->state = VMA_MERGE_ERROR_NOMEM; 1361 if (anon_dup) 1362 unlink_anon_vmas(anon_dup); 1363 1364 anon_vma_fail: 1365 if (err == -ENOMEM) 1366 vmg->state = VMA_MERGE_ERROR_NOMEM; 1367 1368 vma_iter_set(vmg->vmi, addr); 1369 vma_iter_load(vmg->vmi); 1370 return NULL; 1371 } 1372 1373 /* 1374 * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd 1375 * context and anonymous VMA name within the range [start, end). 1376 * 1377 * As a result, we might be able to merge the newly modified VMA range with an 1378 * adjacent VMA with identical properties. 1379 * 1380 * If no merge is possible and the range does not span the entirety of the VMA, 1381 * we then need to split the VMA to accommodate the change. 1382 * 1383 * The function returns either the merged VMA, the original VMA if a split was 1384 * required instead, or an error if the split failed. 1385 */ 1386 static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg) 1387 { 1388 struct vm_area_struct *vma = vmg->vma; 1389 struct vm_area_struct *merged; 1390 1391 /* First, try to merge. */ 1392 merged = vma_merge(vmg); 1393 if (merged) 1394 return merged; 1395 1396 /* Split any preceding portion of the VMA. */ 1397 if (vma->vm_start < vmg->start) { 1398 int err = split_vma(vmg->vmi, vma, vmg->start, 1); 1399 1400 if (err) 1401 return ERR_PTR(err); 1402 } 1403 1404 /* Split any trailing portion of the VMA. */ 1405 if (vma->vm_end > vmg->end) { 1406 int err = split_vma(vmg->vmi, vma, vmg->end, 0); 1407 1408 if (err) 1409 return ERR_PTR(err); 1410 } 1411 1412 return vma; 1413 } 1414 1415 struct vm_area_struct *vma_modify_flags( 1416 struct vma_iterator *vmi, struct vm_area_struct *prev, 1417 struct vm_area_struct *vma, unsigned long start, unsigned long end, 1418 unsigned long new_flags) 1419 { 1420 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1421 1422 vmg.flags = new_flags; 1423 1424 return vma_modify(&vmg); 1425 } 1426 1427 struct vm_area_struct 1428 *vma_modify_flags_name(struct vma_iterator *vmi, 1429 struct vm_area_struct *prev, 1430 struct vm_area_struct *vma, 1431 unsigned long start, 1432 unsigned long end, 1433 unsigned long new_flags, 1434 struct anon_vma_name *new_name) 1435 { 1436 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1437 1438 vmg.flags = new_flags; 1439 vmg.anon_name = new_name; 1440 1441 return vma_modify(&vmg); 1442 } 1443 1444 struct vm_area_struct 1445 *vma_modify_policy(struct vma_iterator *vmi, 1446 struct vm_area_struct *prev, 1447 struct vm_area_struct *vma, 1448 unsigned long start, unsigned long end, 1449 struct mempolicy *new_pol) 1450 { 1451 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1452 1453 vmg.policy = new_pol; 1454 1455 return vma_modify(&vmg); 1456 } 1457 1458 struct vm_area_struct 1459 *vma_modify_flags_uffd(struct vma_iterator *vmi, 1460 struct vm_area_struct *prev, 1461 struct vm_area_struct *vma, 1462 unsigned long start, unsigned long end, 1463 unsigned long new_flags, 1464 struct vm_userfaultfd_ctx new_ctx) 1465 { 1466 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1467 1468 vmg.flags = new_flags; 1469 vmg.uffd_ctx = new_ctx; 1470 1471 return vma_modify(&vmg); 1472 } 1473 1474 /* 1475 * Expand vma by delta bytes, potentially merging with an immediately adjacent 1476 * VMA with identical properties. 1477 */ 1478 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi, 1479 struct vm_area_struct *vma, 1480 unsigned long delta) 1481 { 1482 VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta); 1483 1484 vmg.next = vma_iter_next_rewind(vmi, NULL); 1485 vmg.vma = NULL; /* We use the VMA to populate VMG fields only. */ 1486 1487 return vma_merge_new_range(&vmg); 1488 } 1489 1490 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb) 1491 { 1492 vb->count = 0; 1493 } 1494 1495 static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb) 1496 { 1497 struct address_space *mapping; 1498 int i; 1499 1500 mapping = vb->vmas[0]->vm_file->f_mapping; 1501 i_mmap_lock_write(mapping); 1502 for (i = 0; i < vb->count; i++) { 1503 VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping); 1504 __remove_shared_vm_struct(vb->vmas[i], mapping); 1505 } 1506 i_mmap_unlock_write(mapping); 1507 1508 unlink_file_vma_batch_init(vb); 1509 } 1510 1511 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb, 1512 struct vm_area_struct *vma) 1513 { 1514 if (vma->vm_file == NULL) 1515 return; 1516 1517 if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) || 1518 vb->count == ARRAY_SIZE(vb->vmas)) 1519 unlink_file_vma_batch_process(vb); 1520 1521 vb->vmas[vb->count] = vma; 1522 vb->count++; 1523 } 1524 1525 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb) 1526 { 1527 if (vb->count > 0) 1528 unlink_file_vma_batch_process(vb); 1529 } 1530 1531 /* 1532 * Unlink a file-based vm structure from its interval tree, to hide 1533 * vma from rmap and vmtruncate before freeing its page tables. 1534 */ 1535 void unlink_file_vma(struct vm_area_struct *vma) 1536 { 1537 struct file *file = vma->vm_file; 1538 1539 if (file) { 1540 struct address_space *mapping = file->f_mapping; 1541 1542 i_mmap_lock_write(mapping); 1543 __remove_shared_vm_struct(vma, mapping); 1544 i_mmap_unlock_write(mapping); 1545 } 1546 } 1547 1548 void vma_link_file(struct vm_area_struct *vma) 1549 { 1550 struct file *file = vma->vm_file; 1551 struct address_space *mapping; 1552 1553 if (file) { 1554 mapping = file->f_mapping; 1555 i_mmap_lock_write(mapping); 1556 __vma_link_file(vma, mapping); 1557 i_mmap_unlock_write(mapping); 1558 } 1559 } 1560 1561 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) 1562 { 1563 VMA_ITERATOR(vmi, mm, 0); 1564 1565 vma_iter_config(&vmi, vma->vm_start, vma->vm_end); 1566 if (vma_iter_prealloc(&vmi, vma)) 1567 return -ENOMEM; 1568 1569 vma_start_write(vma); 1570 vma_iter_store(&vmi, vma); 1571 vma_link_file(vma); 1572 mm->map_count++; 1573 validate_mm(mm); 1574 return 0; 1575 } 1576 1577 /* 1578 * Copy the vma structure to a new location in the same mm, 1579 * prior to moving page table entries, to effect an mremap move. 1580 */ 1581 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 1582 unsigned long addr, unsigned long len, pgoff_t pgoff, 1583 bool *need_rmap_locks) 1584 { 1585 struct vm_area_struct *vma = *vmap; 1586 unsigned long vma_start = vma->vm_start; 1587 struct mm_struct *mm = vma->vm_mm; 1588 struct vm_area_struct *new_vma; 1589 bool faulted_in_anon_vma = true; 1590 VMA_ITERATOR(vmi, mm, addr); 1591 VMG_VMA_STATE(vmg, &vmi, NULL, vma, addr, addr + len); 1592 1593 /* 1594 * If anonymous vma has not yet been faulted, update new pgoff 1595 * to match new location, to increase its chance of merging. 1596 */ 1597 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { 1598 pgoff = addr >> PAGE_SHIFT; 1599 faulted_in_anon_vma = false; 1600 } 1601 1602 new_vma = find_vma_prev(mm, addr, &vmg.prev); 1603 if (new_vma && new_vma->vm_start < addr + len) 1604 return NULL; /* should never get here */ 1605 1606 vmg.vma = NULL; /* New VMA range. */ 1607 vmg.pgoff = pgoff; 1608 vmg.next = vma_iter_next_rewind(&vmi, NULL); 1609 new_vma = vma_merge_new_range(&vmg); 1610 1611 if (new_vma) { 1612 /* 1613 * Source vma may have been merged into new_vma 1614 */ 1615 if (unlikely(vma_start >= new_vma->vm_start && 1616 vma_start < new_vma->vm_end)) { 1617 /* 1618 * The only way we can get a vma_merge with 1619 * self during an mremap is if the vma hasn't 1620 * been faulted in yet and we were allowed to 1621 * reset the dst vma->vm_pgoff to the 1622 * destination address of the mremap to allow 1623 * the merge to happen. mremap must change the 1624 * vm_pgoff linearity between src and dst vmas 1625 * (in turn preventing a vma_merge) to be 1626 * safe. It is only safe to keep the vm_pgoff 1627 * linear if there are no pages mapped yet. 1628 */ 1629 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); 1630 *vmap = vma = new_vma; 1631 } 1632 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); 1633 } else { 1634 new_vma = vm_area_dup(vma); 1635 if (!new_vma) 1636 goto out; 1637 vma_set_range(new_vma, addr, addr + len, pgoff); 1638 if (vma_dup_policy(vma, new_vma)) 1639 goto out_free_vma; 1640 if (anon_vma_clone(new_vma, vma)) 1641 goto out_free_mempol; 1642 if (new_vma->vm_file) 1643 get_file(new_vma->vm_file); 1644 if (new_vma->vm_ops && new_vma->vm_ops->open) 1645 new_vma->vm_ops->open(new_vma); 1646 if (vma_link(mm, new_vma)) 1647 goto out_vma_link; 1648 *need_rmap_locks = false; 1649 } 1650 return new_vma; 1651 1652 out_vma_link: 1653 if (new_vma->vm_ops && new_vma->vm_ops->close) 1654 new_vma->vm_ops->close(new_vma); 1655 1656 if (new_vma->vm_file) 1657 fput(new_vma->vm_file); 1658 1659 unlink_anon_vmas(new_vma); 1660 out_free_mempol: 1661 mpol_put(vma_policy(new_vma)); 1662 out_free_vma: 1663 vm_area_free(new_vma); 1664 out: 1665 return NULL; 1666 } 1667 1668 /* 1669 * Rough compatibility check to quickly see if it's even worth looking 1670 * at sharing an anon_vma. 1671 * 1672 * They need to have the same vm_file, and the flags can only differ 1673 * in things that mprotect may change. 1674 * 1675 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that 1676 * we can merge the two vma's. For example, we refuse to merge a vma if 1677 * there is a vm_ops->close() function, because that indicates that the 1678 * driver is doing some kind of reference counting. But that doesn't 1679 * really matter for the anon_vma sharing case. 1680 */ 1681 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) 1682 { 1683 return a->vm_end == b->vm_start && 1684 mpol_equal(vma_policy(a), vma_policy(b)) && 1685 a->vm_file == b->vm_file && 1686 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) && 1687 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); 1688 } 1689 1690 /* 1691 * Do some basic sanity checking to see if we can re-use the anon_vma 1692 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be 1693 * the same as 'old', the other will be the new one that is trying 1694 * to share the anon_vma. 1695 * 1696 * NOTE! This runs with mmap_lock held for reading, so it is possible that 1697 * the anon_vma of 'old' is concurrently in the process of being set up 1698 * by another page fault trying to merge _that_. But that's ok: if it 1699 * is being set up, that automatically means that it will be a singleton 1700 * acceptable for merging, so we can do all of this optimistically. But 1701 * we do that READ_ONCE() to make sure that we never re-load the pointer. 1702 * 1703 * IOW: that the "list_is_singular()" test on the anon_vma_chain only 1704 * matters for the 'stable anon_vma' case (ie the thing we want to avoid 1705 * is to return an anon_vma that is "complex" due to having gone through 1706 * a fork). 1707 * 1708 * We also make sure that the two vma's are compatible (adjacent, 1709 * and with the same memory policies). That's all stable, even with just 1710 * a read lock on the mmap_lock. 1711 */ 1712 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, 1713 struct vm_area_struct *a, 1714 struct vm_area_struct *b) 1715 { 1716 if (anon_vma_compatible(a, b)) { 1717 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); 1718 1719 if (anon_vma && list_is_singular(&old->anon_vma_chain)) 1720 return anon_vma; 1721 } 1722 return NULL; 1723 } 1724 1725 /* 1726 * find_mergeable_anon_vma is used by anon_vma_prepare, to check 1727 * neighbouring vmas for a suitable anon_vma, before it goes off 1728 * to allocate a new anon_vma. It checks because a repetitive 1729 * sequence of mprotects and faults may otherwise lead to distinct 1730 * anon_vmas being allocated, preventing vma merge in subsequent 1731 * mprotect. 1732 */ 1733 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) 1734 { 1735 struct anon_vma *anon_vma = NULL; 1736 struct vm_area_struct *prev, *next; 1737 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end); 1738 1739 /* Try next first. */ 1740 next = vma_iter_load(&vmi); 1741 if (next) { 1742 anon_vma = reusable_anon_vma(next, vma, next); 1743 if (anon_vma) 1744 return anon_vma; 1745 } 1746 1747 prev = vma_prev(&vmi); 1748 VM_BUG_ON_VMA(prev != vma, vma); 1749 prev = vma_prev(&vmi); 1750 /* Try prev next. */ 1751 if (prev) 1752 anon_vma = reusable_anon_vma(prev, prev, vma); 1753 1754 /* 1755 * We might reach here with anon_vma == NULL if we can't find 1756 * any reusable anon_vma. 1757 * There's no absolute need to look only at touching neighbours: 1758 * we could search further afield for "compatible" anon_vmas. 1759 * But it would probably just be a waste of time searching, 1760 * or lead to too many vmas hanging off the same anon_vma. 1761 * We're trying to allow mprotect remerging later on, 1762 * not trying to minimize memory used for anon_vmas. 1763 */ 1764 return anon_vma; 1765 } 1766 1767 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops) 1768 { 1769 return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite); 1770 } 1771 1772 static bool vma_is_shared_writable(struct vm_area_struct *vma) 1773 { 1774 return (vma->vm_flags & (VM_WRITE | VM_SHARED)) == 1775 (VM_WRITE | VM_SHARED); 1776 } 1777 1778 static bool vma_fs_can_writeback(struct vm_area_struct *vma) 1779 { 1780 /* No managed pages to writeback. */ 1781 if (vma->vm_flags & VM_PFNMAP) 1782 return false; 1783 1784 return vma->vm_file && vma->vm_file->f_mapping && 1785 mapping_can_writeback(vma->vm_file->f_mapping); 1786 } 1787 1788 /* 1789 * Does this VMA require the underlying folios to have their dirty state 1790 * tracked? 1791 */ 1792 bool vma_needs_dirty_tracking(struct vm_area_struct *vma) 1793 { 1794 /* Only shared, writable VMAs require dirty tracking. */ 1795 if (!vma_is_shared_writable(vma)) 1796 return false; 1797 1798 /* Does the filesystem need to be notified? */ 1799 if (vm_ops_needs_writenotify(vma->vm_ops)) 1800 return true; 1801 1802 /* 1803 * Even if the filesystem doesn't indicate a need for writenotify, if it 1804 * can writeback, dirty tracking is still required. 1805 */ 1806 return vma_fs_can_writeback(vma); 1807 } 1808 1809 /* 1810 * Some shared mappings will want the pages marked read-only 1811 * to track write events. If so, we'll downgrade vm_page_prot 1812 * to the private version (using protection_map[] without the 1813 * VM_SHARED bit). 1814 */ 1815 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) 1816 { 1817 /* If it was private or non-writable, the write bit is already clear */ 1818 if (!vma_is_shared_writable(vma)) 1819 return false; 1820 1821 /* The backer wishes to know when pages are first written to? */ 1822 if (vm_ops_needs_writenotify(vma->vm_ops)) 1823 return true; 1824 1825 /* The open routine did something to the protections that pgprot_modify 1826 * won't preserve? */ 1827 if (pgprot_val(vm_page_prot) != 1828 pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags))) 1829 return false; 1830 1831 /* 1832 * Do we need to track softdirty? hugetlb does not support softdirty 1833 * tracking yet. 1834 */ 1835 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma)) 1836 return true; 1837 1838 /* Do we need write faults for uffd-wp tracking? */ 1839 if (userfaultfd_wp(vma)) 1840 return true; 1841 1842 /* Can the mapping track the dirty pages? */ 1843 return vma_fs_can_writeback(vma); 1844 } 1845 1846 static DEFINE_MUTEX(mm_all_locks_mutex); 1847 1848 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) 1849 { 1850 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 1851 /* 1852 * The LSB of head.next can't change from under us 1853 * because we hold the mm_all_locks_mutex. 1854 */ 1855 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); 1856 /* 1857 * We can safely modify head.next after taking the 1858 * anon_vma->root->rwsem. If some other vma in this mm shares 1859 * the same anon_vma we won't take it again. 1860 * 1861 * No need of atomic instructions here, head.next 1862 * can't change from under us thanks to the 1863 * anon_vma->root->rwsem. 1864 */ 1865 if (__test_and_set_bit(0, (unsigned long *) 1866 &anon_vma->root->rb_root.rb_root.rb_node)) 1867 BUG(); 1868 } 1869 } 1870 1871 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) 1872 { 1873 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 1874 /* 1875 * AS_MM_ALL_LOCKS can't change from under us because 1876 * we hold the mm_all_locks_mutex. 1877 * 1878 * Operations on ->flags have to be atomic because 1879 * even if AS_MM_ALL_LOCKS is stable thanks to the 1880 * mm_all_locks_mutex, there may be other cpus 1881 * changing other bitflags in parallel to us. 1882 */ 1883 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) 1884 BUG(); 1885 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); 1886 } 1887 } 1888 1889 /* 1890 * This operation locks against the VM for all pte/vma/mm related 1891 * operations that could ever happen on a certain mm. This includes 1892 * vmtruncate, try_to_unmap, and all page faults. 1893 * 1894 * The caller must take the mmap_lock in write mode before calling 1895 * mm_take_all_locks(). The caller isn't allowed to release the 1896 * mmap_lock until mm_drop_all_locks() returns. 1897 * 1898 * mmap_lock in write mode is required in order to block all operations 1899 * that could modify pagetables and free pages without need of 1900 * altering the vma layout. It's also needed in write mode to avoid new 1901 * anon_vmas to be associated with existing vmas. 1902 * 1903 * A single task can't take more than one mm_take_all_locks() in a row 1904 * or it would deadlock. 1905 * 1906 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in 1907 * mapping->flags avoid to take the same lock twice, if more than one 1908 * vma in this mm is backed by the same anon_vma or address_space. 1909 * 1910 * We take locks in following order, accordingly to comment at beginning 1911 * of mm/rmap.c: 1912 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for 1913 * hugetlb mapping); 1914 * - all vmas marked locked 1915 * - all i_mmap_rwsem locks; 1916 * - all anon_vma->rwseml 1917 * 1918 * We can take all locks within these types randomly because the VM code 1919 * doesn't nest them and we protected from parallel mm_take_all_locks() by 1920 * mm_all_locks_mutex. 1921 * 1922 * mm_take_all_locks() and mm_drop_all_locks are expensive operations 1923 * that may have to take thousand of locks. 1924 * 1925 * mm_take_all_locks() can fail if it's interrupted by signals. 1926 */ 1927 int mm_take_all_locks(struct mm_struct *mm) 1928 { 1929 struct vm_area_struct *vma; 1930 struct anon_vma_chain *avc; 1931 VMA_ITERATOR(vmi, mm, 0); 1932 1933 mmap_assert_write_locked(mm); 1934 1935 mutex_lock(&mm_all_locks_mutex); 1936 1937 /* 1938 * vma_start_write() does not have a complement in mm_drop_all_locks() 1939 * because vma_start_write() is always asymmetrical; it marks a VMA as 1940 * being written to until mmap_write_unlock() or mmap_write_downgrade() 1941 * is reached. 1942 */ 1943 for_each_vma(vmi, vma) { 1944 if (signal_pending(current)) 1945 goto out_unlock; 1946 vma_start_write(vma); 1947 } 1948 1949 vma_iter_init(&vmi, mm, 0); 1950 for_each_vma(vmi, vma) { 1951 if (signal_pending(current)) 1952 goto out_unlock; 1953 if (vma->vm_file && vma->vm_file->f_mapping && 1954 is_vm_hugetlb_page(vma)) 1955 vm_lock_mapping(mm, vma->vm_file->f_mapping); 1956 } 1957 1958 vma_iter_init(&vmi, mm, 0); 1959 for_each_vma(vmi, vma) { 1960 if (signal_pending(current)) 1961 goto out_unlock; 1962 if (vma->vm_file && vma->vm_file->f_mapping && 1963 !is_vm_hugetlb_page(vma)) 1964 vm_lock_mapping(mm, vma->vm_file->f_mapping); 1965 } 1966 1967 vma_iter_init(&vmi, mm, 0); 1968 for_each_vma(vmi, vma) { 1969 if (signal_pending(current)) 1970 goto out_unlock; 1971 if (vma->anon_vma) 1972 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 1973 vm_lock_anon_vma(mm, avc->anon_vma); 1974 } 1975 1976 return 0; 1977 1978 out_unlock: 1979 mm_drop_all_locks(mm); 1980 return -EINTR; 1981 } 1982 1983 static void vm_unlock_anon_vma(struct anon_vma *anon_vma) 1984 { 1985 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 1986 /* 1987 * The LSB of head.next can't change to 0 from under 1988 * us because we hold the mm_all_locks_mutex. 1989 * 1990 * We must however clear the bitflag before unlocking 1991 * the vma so the users using the anon_vma->rb_root will 1992 * never see our bitflag. 1993 * 1994 * No need of atomic instructions here, head.next 1995 * can't change from under us until we release the 1996 * anon_vma->root->rwsem. 1997 */ 1998 if (!__test_and_clear_bit(0, (unsigned long *) 1999 &anon_vma->root->rb_root.rb_root.rb_node)) 2000 BUG(); 2001 anon_vma_unlock_write(anon_vma); 2002 } 2003 } 2004 2005 static void vm_unlock_mapping(struct address_space *mapping) 2006 { 2007 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 2008 /* 2009 * AS_MM_ALL_LOCKS can't change to 0 from under us 2010 * because we hold the mm_all_locks_mutex. 2011 */ 2012 i_mmap_unlock_write(mapping); 2013 if (!test_and_clear_bit(AS_MM_ALL_LOCKS, 2014 &mapping->flags)) 2015 BUG(); 2016 } 2017 } 2018 2019 /* 2020 * The mmap_lock cannot be released by the caller until 2021 * mm_drop_all_locks() returns. 2022 */ 2023 void mm_drop_all_locks(struct mm_struct *mm) 2024 { 2025 struct vm_area_struct *vma; 2026 struct anon_vma_chain *avc; 2027 VMA_ITERATOR(vmi, mm, 0); 2028 2029 mmap_assert_write_locked(mm); 2030 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); 2031 2032 for_each_vma(vmi, vma) { 2033 if (vma->anon_vma) 2034 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 2035 vm_unlock_anon_vma(avc->anon_vma); 2036 if (vma->vm_file && vma->vm_file->f_mapping) 2037 vm_unlock_mapping(vma->vm_file->f_mapping); 2038 } 2039 2040 mutex_unlock(&mm_all_locks_mutex); 2041 } 2042