1 // SPDX-License-Identifier: GPL-2.0-or-later 2 3 /* 4 * VMA-specific functions. 5 */ 6 7 #include "vma_internal.h" 8 #include "vma.h" 9 10 static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next) 11 { 12 struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev; 13 14 if (!mpol_equal(vmg->policy, vma_policy(vma))) 15 return false; 16 /* 17 * VM_SOFTDIRTY should not prevent from VMA merging, if we 18 * match the flags but dirty bit -- the caller should mark 19 * merged VMA as dirty. If dirty bit won't be excluded from 20 * comparison, we increase pressure on the memory system forcing 21 * the kernel to generate new VMAs when old one could be 22 * extended instead. 23 */ 24 if ((vma->vm_flags ^ vmg->flags) & ~VM_SOFTDIRTY) 25 return false; 26 if (vma->vm_file != vmg->file) 27 return false; 28 if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx)) 29 return false; 30 if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name)) 31 return false; 32 return true; 33 } 34 35 static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1, 36 struct anon_vma *anon_vma2, struct vm_area_struct *vma) 37 { 38 /* 39 * The list_is_singular() test is to avoid merging VMA cloned from 40 * parents. This can improve scalability caused by anon_vma lock. 41 */ 42 if ((!anon_vma1 || !anon_vma2) && (!vma || 43 list_is_singular(&vma->anon_vma_chain))) 44 return true; 45 return anon_vma1 == anon_vma2; 46 } 47 48 /* Are the anon_vma's belonging to each VMA compatible with one another? */ 49 static inline bool are_anon_vmas_compatible(struct vm_area_struct *vma1, 50 struct vm_area_struct *vma2) 51 { 52 return is_mergeable_anon_vma(vma1->anon_vma, vma2->anon_vma, NULL); 53 } 54 55 /* 56 * init_multi_vma_prep() - Initializer for struct vma_prepare 57 * @vp: The vma_prepare struct 58 * @vma: The vma that will be altered once locked 59 * @next: The next vma if it is to be adjusted 60 * @remove: The first vma to be removed 61 * @remove2: The second vma to be removed 62 */ 63 static void init_multi_vma_prep(struct vma_prepare *vp, 64 struct vm_area_struct *vma, 65 struct vm_area_struct *next, 66 struct vm_area_struct *remove, 67 struct vm_area_struct *remove2) 68 { 69 memset(vp, 0, sizeof(struct vma_prepare)); 70 vp->vma = vma; 71 vp->anon_vma = vma->anon_vma; 72 vp->remove = remove; 73 vp->remove2 = remove2; 74 vp->adj_next = next; 75 if (!vp->anon_vma && next) 76 vp->anon_vma = next->anon_vma; 77 78 vp->file = vma->vm_file; 79 if (vp->file) 80 vp->mapping = vma->vm_file->f_mapping; 81 82 } 83 84 /* 85 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 86 * in front of (at a lower virtual address and file offset than) the vma. 87 * 88 * We cannot merge two vmas if they have differently assigned (non-NULL) 89 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 90 * 91 * We don't check here for the merged mmap wrapping around the end of pagecache 92 * indices (16TB on ia32) because do_mmap() does not permit mmap's which 93 * wrap, nor mmaps which cover the final page at index -1UL. 94 * 95 * We assume the vma may be removed as part of the merge. 96 */ 97 static bool can_vma_merge_before(struct vma_merge_struct *vmg) 98 { 99 pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start); 100 101 if (is_mergeable_vma(vmg, /* merge_next = */ true) && 102 is_mergeable_anon_vma(vmg->anon_vma, vmg->next->anon_vma, vmg->next)) { 103 if (vmg->next->vm_pgoff == vmg->pgoff + pglen) 104 return true; 105 } 106 107 return false; 108 } 109 110 /* 111 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 112 * beyond (at a higher virtual address and file offset than) the vma. 113 * 114 * We cannot merge two vmas if they have differently assigned (non-NULL) 115 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 116 * 117 * We assume that vma is not removed as part of the merge. 118 */ 119 static bool can_vma_merge_after(struct vma_merge_struct *vmg) 120 { 121 if (is_mergeable_vma(vmg, /* merge_next = */ false) && 122 is_mergeable_anon_vma(vmg->anon_vma, vmg->prev->anon_vma, vmg->prev)) { 123 if (vmg->prev->vm_pgoff + vma_pages(vmg->prev) == vmg->pgoff) 124 return true; 125 } 126 return false; 127 } 128 129 static void __vma_link_file(struct vm_area_struct *vma, 130 struct address_space *mapping) 131 { 132 if (vma_is_shared_maywrite(vma)) 133 mapping_allow_writable(mapping); 134 135 flush_dcache_mmap_lock(mapping); 136 vma_interval_tree_insert(vma, &mapping->i_mmap); 137 flush_dcache_mmap_unlock(mapping); 138 } 139 140 /* 141 * Requires inode->i_mapping->i_mmap_rwsem 142 */ 143 static void __remove_shared_vm_struct(struct vm_area_struct *vma, 144 struct address_space *mapping) 145 { 146 if (vma_is_shared_maywrite(vma)) 147 mapping_unmap_writable(mapping); 148 149 flush_dcache_mmap_lock(mapping); 150 vma_interval_tree_remove(vma, &mapping->i_mmap); 151 flush_dcache_mmap_unlock(mapping); 152 } 153 154 /* 155 * vma_prepare() - Helper function for handling locking VMAs prior to altering 156 * @vp: The initialized vma_prepare struct 157 */ 158 static void vma_prepare(struct vma_prepare *vp) 159 { 160 if (vp->file) { 161 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end); 162 163 if (vp->adj_next) 164 uprobe_munmap(vp->adj_next, vp->adj_next->vm_start, 165 vp->adj_next->vm_end); 166 167 i_mmap_lock_write(vp->mapping); 168 if (vp->insert && vp->insert->vm_file) { 169 /* 170 * Put into interval tree now, so instantiated pages 171 * are visible to arm/parisc __flush_dcache_page 172 * throughout; but we cannot insert into address 173 * space until vma start or end is updated. 174 */ 175 __vma_link_file(vp->insert, 176 vp->insert->vm_file->f_mapping); 177 } 178 } 179 180 if (vp->anon_vma) { 181 anon_vma_lock_write(vp->anon_vma); 182 anon_vma_interval_tree_pre_update_vma(vp->vma); 183 if (vp->adj_next) 184 anon_vma_interval_tree_pre_update_vma(vp->adj_next); 185 } 186 187 if (vp->file) { 188 flush_dcache_mmap_lock(vp->mapping); 189 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap); 190 if (vp->adj_next) 191 vma_interval_tree_remove(vp->adj_next, 192 &vp->mapping->i_mmap); 193 } 194 195 } 196 197 /* 198 * vma_complete- Helper function for handling the unlocking after altering VMAs, 199 * or for inserting a VMA. 200 * 201 * @vp: The vma_prepare struct 202 * @vmi: The vma iterator 203 * @mm: The mm_struct 204 */ 205 static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi, 206 struct mm_struct *mm) 207 { 208 if (vp->file) { 209 if (vp->adj_next) 210 vma_interval_tree_insert(vp->adj_next, 211 &vp->mapping->i_mmap); 212 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap); 213 flush_dcache_mmap_unlock(vp->mapping); 214 } 215 216 if (vp->remove && vp->file) { 217 __remove_shared_vm_struct(vp->remove, vp->mapping); 218 if (vp->remove2) 219 __remove_shared_vm_struct(vp->remove2, vp->mapping); 220 } else if (vp->insert) { 221 /* 222 * split_vma has split insert from vma, and needs 223 * us to insert it before dropping the locks 224 * (it may either follow vma or precede it). 225 */ 226 vma_iter_store(vmi, vp->insert); 227 mm->map_count++; 228 } 229 230 if (vp->anon_vma) { 231 anon_vma_interval_tree_post_update_vma(vp->vma); 232 if (vp->adj_next) 233 anon_vma_interval_tree_post_update_vma(vp->adj_next); 234 anon_vma_unlock_write(vp->anon_vma); 235 } 236 237 if (vp->file) { 238 i_mmap_unlock_write(vp->mapping); 239 uprobe_mmap(vp->vma); 240 241 if (vp->adj_next) 242 uprobe_mmap(vp->adj_next); 243 } 244 245 if (vp->remove) { 246 again: 247 vma_mark_detached(vp->remove, true); 248 if (vp->file) { 249 uprobe_munmap(vp->remove, vp->remove->vm_start, 250 vp->remove->vm_end); 251 fput(vp->file); 252 } 253 if (vp->remove->anon_vma) 254 anon_vma_merge(vp->vma, vp->remove); 255 mm->map_count--; 256 mpol_put(vma_policy(vp->remove)); 257 if (!vp->remove2) 258 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end); 259 vm_area_free(vp->remove); 260 261 /* 262 * In mprotect's case 6 (see comments on vma_merge), 263 * we are removing both mid and next vmas 264 */ 265 if (vp->remove2) { 266 vp->remove = vp->remove2; 267 vp->remove2 = NULL; 268 goto again; 269 } 270 } 271 if (vp->insert && vp->file) 272 uprobe_mmap(vp->insert); 273 } 274 275 /* 276 * init_vma_prep() - Initializer wrapper for vma_prepare struct 277 * @vp: The vma_prepare struct 278 * @vma: The vma that will be altered once locked 279 */ 280 static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma) 281 { 282 init_multi_vma_prep(vp, vma, NULL, NULL, NULL); 283 } 284 285 /* 286 * Can the proposed VMA be merged with the left (previous) VMA taking into 287 * account the start position of the proposed range. 288 */ 289 static bool can_vma_merge_left(struct vma_merge_struct *vmg) 290 291 { 292 return vmg->prev && vmg->prev->vm_end == vmg->start && 293 can_vma_merge_after(vmg); 294 } 295 296 /* 297 * Can the proposed VMA be merged with the right (next) VMA taking into 298 * account the end position of the proposed range. 299 * 300 * In addition, if we can merge with the left VMA, ensure that left and right 301 * anon_vma's are also compatible. 302 */ 303 static bool can_vma_merge_right(struct vma_merge_struct *vmg, 304 bool can_merge_left) 305 { 306 if (!vmg->next || vmg->end != vmg->next->vm_start || 307 !can_vma_merge_before(vmg)) 308 return false; 309 310 if (!can_merge_left) 311 return true; 312 313 /* 314 * If we can merge with prev (left) and next (right), indicating that 315 * each VMA's anon_vma is compatible with the proposed anon_vma, this 316 * does not mean prev and next are compatible with EACH OTHER. 317 * 318 * We therefore check this in addition to mergeability to either side. 319 */ 320 return are_anon_vmas_compatible(vmg->prev, vmg->next); 321 } 322 323 /* 324 * Close a vm structure and free it. 325 */ 326 void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed) 327 { 328 might_sleep(); 329 if (!closed && vma->vm_ops && vma->vm_ops->close) 330 vma->vm_ops->close(vma); 331 if (vma->vm_file) 332 fput(vma->vm_file); 333 mpol_put(vma_policy(vma)); 334 if (unreachable) 335 __vm_area_free(vma); 336 else 337 vm_area_free(vma); 338 } 339 340 /* 341 * Get rid of page table information in the indicated region. 342 * 343 * Called with the mm semaphore held. 344 */ 345 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, 346 struct vm_area_struct *prev, struct vm_area_struct *next) 347 { 348 struct mm_struct *mm = vma->vm_mm; 349 struct mmu_gather tlb; 350 351 lru_add_drain(); 352 tlb_gather_mmu(&tlb, mm); 353 update_hiwater_rss(mm); 354 unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end, 355 /* mm_wr_locked = */ true); 356 mas_set(mas, vma->vm_end); 357 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 358 next ? next->vm_start : USER_PGTABLES_CEILING, 359 /* mm_wr_locked = */ true); 360 tlb_finish_mmu(&tlb); 361 } 362 363 /* 364 * __split_vma() bypasses sysctl_max_map_count checking. We use this where it 365 * has already been checked or doesn't make sense to fail. 366 * VMA Iterator will point to the original VMA. 367 */ 368 static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, 369 unsigned long addr, int new_below) 370 { 371 struct vma_prepare vp; 372 struct vm_area_struct *new; 373 int err; 374 375 WARN_ON(vma->vm_start >= addr); 376 WARN_ON(vma->vm_end <= addr); 377 378 if (vma->vm_ops && vma->vm_ops->may_split) { 379 err = vma->vm_ops->may_split(vma, addr); 380 if (err) 381 return err; 382 } 383 384 new = vm_area_dup(vma); 385 if (!new) 386 return -ENOMEM; 387 388 if (new_below) { 389 new->vm_end = addr; 390 } else { 391 new->vm_start = addr; 392 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); 393 } 394 395 err = -ENOMEM; 396 vma_iter_config(vmi, new->vm_start, new->vm_end); 397 if (vma_iter_prealloc(vmi, new)) 398 goto out_free_vma; 399 400 err = vma_dup_policy(vma, new); 401 if (err) 402 goto out_free_vmi; 403 404 err = anon_vma_clone(new, vma); 405 if (err) 406 goto out_free_mpol; 407 408 if (new->vm_file) 409 get_file(new->vm_file); 410 411 if (new->vm_ops && new->vm_ops->open) 412 new->vm_ops->open(new); 413 414 vma_start_write(vma); 415 vma_start_write(new); 416 417 init_vma_prep(&vp, vma); 418 vp.insert = new; 419 vma_prepare(&vp); 420 vma_adjust_trans_huge(vma, vma->vm_start, addr, 0); 421 422 if (new_below) { 423 vma->vm_start = addr; 424 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT; 425 } else { 426 vma->vm_end = addr; 427 } 428 429 /* vma_complete stores the new vma */ 430 vma_complete(&vp, vmi, vma->vm_mm); 431 validate_mm(vma->vm_mm); 432 433 /* Success. */ 434 if (new_below) 435 vma_next(vmi); 436 else 437 vma_prev(vmi); 438 439 return 0; 440 441 out_free_mpol: 442 mpol_put(vma_policy(new)); 443 out_free_vmi: 444 vma_iter_free(vmi); 445 out_free_vma: 446 vm_area_free(new); 447 return err; 448 } 449 450 /* 451 * Split a vma into two pieces at address 'addr', a new vma is allocated 452 * either for the first part or the tail. 453 */ 454 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, 455 unsigned long addr, int new_below) 456 { 457 if (vma->vm_mm->map_count >= sysctl_max_map_count) 458 return -ENOMEM; 459 460 return __split_vma(vmi, vma, addr, new_below); 461 } 462 463 /* 464 * vma has some anon_vma assigned, and is already inserted on that 465 * anon_vma's interval trees. 466 * 467 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the 468 * vma must be removed from the anon_vma's interval trees using 469 * anon_vma_interval_tree_pre_update_vma(). 470 * 471 * After the update, the vma will be reinserted using 472 * anon_vma_interval_tree_post_update_vma(). 473 * 474 * The entire update must be protected by exclusive mmap_lock and by 475 * the root anon_vma's mutex. 476 */ 477 void 478 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) 479 { 480 struct anon_vma_chain *avc; 481 482 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 483 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); 484 } 485 486 void 487 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) 488 { 489 struct anon_vma_chain *avc; 490 491 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 492 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); 493 } 494 495 /* 496 * dup_anon_vma() - Helper function to duplicate anon_vma 497 * @dst: The destination VMA 498 * @src: The source VMA 499 * @dup: Pointer to the destination VMA when successful. 500 * 501 * Returns: 0 on success. 502 */ 503 static int dup_anon_vma(struct vm_area_struct *dst, 504 struct vm_area_struct *src, struct vm_area_struct **dup) 505 { 506 /* 507 * Easily overlooked: when mprotect shifts the boundary, make sure the 508 * expanding vma has anon_vma set if the shrinking vma had, to cover any 509 * anon pages imported. 510 */ 511 if (src->anon_vma && !dst->anon_vma) { 512 int ret; 513 514 vma_assert_write_locked(dst); 515 dst->anon_vma = src->anon_vma; 516 ret = anon_vma_clone(dst, src); 517 if (ret) 518 return ret; 519 520 *dup = dst; 521 } 522 523 return 0; 524 } 525 526 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 527 void validate_mm(struct mm_struct *mm) 528 { 529 int bug = 0; 530 int i = 0; 531 struct vm_area_struct *vma; 532 VMA_ITERATOR(vmi, mm, 0); 533 534 mt_validate(&mm->mm_mt); 535 for_each_vma(vmi, vma) { 536 #ifdef CONFIG_DEBUG_VM_RB 537 struct anon_vma *anon_vma = vma->anon_vma; 538 struct anon_vma_chain *avc; 539 #endif 540 unsigned long vmi_start, vmi_end; 541 bool warn = 0; 542 543 vmi_start = vma_iter_addr(&vmi); 544 vmi_end = vma_iter_end(&vmi); 545 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm)) 546 warn = 1; 547 548 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm)) 549 warn = 1; 550 551 if (warn) { 552 pr_emerg("issue in %s\n", current->comm); 553 dump_stack(); 554 dump_vma(vma); 555 pr_emerg("tree range: %px start %lx end %lx\n", vma, 556 vmi_start, vmi_end - 1); 557 vma_iter_dump_tree(&vmi); 558 } 559 560 #ifdef CONFIG_DEBUG_VM_RB 561 if (anon_vma) { 562 anon_vma_lock_read(anon_vma); 563 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 564 anon_vma_interval_tree_verify(avc); 565 anon_vma_unlock_read(anon_vma); 566 } 567 #endif 568 i++; 569 } 570 if (i != mm->map_count) { 571 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i); 572 bug = 1; 573 } 574 VM_BUG_ON_MM(bug, mm); 575 } 576 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ 577 578 /* Actually perform the VMA merge operation. */ 579 static int commit_merge(struct vma_merge_struct *vmg, 580 struct vm_area_struct *adjust, 581 struct vm_area_struct *remove, 582 struct vm_area_struct *remove2, 583 long adj_start, 584 bool expanded) 585 { 586 struct vma_prepare vp; 587 588 init_multi_vma_prep(&vp, vmg->vma, adjust, remove, remove2); 589 590 VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma && 591 vp.anon_vma != adjust->anon_vma); 592 593 if (expanded) { 594 /* Note: vma iterator must be pointing to 'start'. */ 595 vma_iter_config(vmg->vmi, vmg->start, vmg->end); 596 } else { 597 vma_iter_config(vmg->vmi, adjust->vm_start + adj_start, 598 adjust->vm_end); 599 } 600 601 if (vma_iter_prealloc(vmg->vmi, vmg->vma)) 602 return -ENOMEM; 603 604 vma_prepare(&vp); 605 vma_adjust_trans_huge(vmg->vma, vmg->start, vmg->end, adj_start); 606 vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff); 607 608 if (expanded) 609 vma_iter_store(vmg->vmi, vmg->vma); 610 611 if (adj_start) { 612 adjust->vm_start += adj_start; 613 adjust->vm_pgoff += PHYS_PFN(adj_start); 614 if (adj_start < 0) { 615 WARN_ON(expanded); 616 vma_iter_store(vmg->vmi, adjust); 617 } 618 } 619 620 vma_complete(&vp, vmg->vmi, vmg->vma->vm_mm); 621 622 return 0; 623 } 624 625 /* We can only remove VMAs when merging if they do not have a close hook. */ 626 static bool can_merge_remove_vma(struct vm_area_struct *vma) 627 { 628 return !vma->vm_ops || !vma->vm_ops->close; 629 } 630 631 /* 632 * vma_merge_existing_range - Attempt to merge VMAs based on a VMA having its 633 * attributes modified. 634 * 635 * @vmg: Describes the modifications being made to a VMA and associated 636 * metadata. 637 * 638 * When the attributes of a range within a VMA change, then it might be possible 639 * for immediately adjacent VMAs to be merged into that VMA due to having 640 * identical properties. 641 * 642 * This function checks for the existence of any such mergeable VMAs and updates 643 * the maple tree describing the @vmg->vma->vm_mm address space to account for 644 * this, as well as any VMAs shrunk/expanded/deleted as a result of this merge. 645 * 646 * As part of this operation, if a merge occurs, the @vmg object will have its 647 * vma, start, end, and pgoff fields modified to execute the merge. Subsequent 648 * calls to this function should reset these fields. 649 * 650 * Returns: The merged VMA if merge succeeds, or NULL otherwise. 651 * 652 * ASSUMPTIONS: 653 * - The caller must assign the VMA to be modifed to @vmg->vma. 654 * - The caller must have set @vmg->prev to the previous VMA, if there is one. 655 * - The caller must not set @vmg->next, as we determine this. 656 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock. 657 * - vmi must be positioned within [@vmg->vma->vm_start, @vmg->vma->vm_end). 658 */ 659 static struct vm_area_struct *vma_merge_existing_range(struct vma_merge_struct *vmg) 660 { 661 struct vm_area_struct *vma = vmg->vma; 662 struct vm_area_struct *prev = vmg->prev; 663 struct vm_area_struct *next, *res; 664 struct vm_area_struct *anon_dup = NULL; 665 struct vm_area_struct *adjust = NULL; 666 unsigned long start = vmg->start; 667 unsigned long end = vmg->end; 668 bool left_side = vma && start == vma->vm_start; 669 bool right_side = vma && end == vma->vm_end; 670 int err = 0; 671 long adj_start = 0; 672 bool merge_will_delete_vma, merge_will_delete_next; 673 bool merge_left, merge_right, merge_both; 674 bool expanded; 675 676 mmap_assert_write_locked(vmg->mm); 677 VM_WARN_ON(!vma); /* We are modifying a VMA, so caller must specify. */ 678 VM_WARN_ON(vmg->next); /* We set this. */ 679 VM_WARN_ON(prev && start <= prev->vm_start); 680 VM_WARN_ON(start >= end); 681 /* 682 * If vma == prev, then we are offset into a VMA. Otherwise, if we are 683 * not, we must span a portion of the VMA. 684 */ 685 VM_WARN_ON(vma && ((vma != prev && vmg->start != vma->vm_start) || 686 vmg->end > vma->vm_end)); 687 /* The vmi must be positioned within vmg->vma. */ 688 VM_WARN_ON(vma && !(vma_iter_addr(vmg->vmi) >= vma->vm_start && 689 vma_iter_addr(vmg->vmi) < vma->vm_end)); 690 691 vmg->state = VMA_MERGE_NOMERGE; 692 693 /* 694 * If a special mapping or if the range being modified is neither at the 695 * furthermost left or right side of the VMA, then we have no chance of 696 * merging and should abort. 697 */ 698 if (vmg->flags & VM_SPECIAL || (!left_side && !right_side)) 699 return NULL; 700 701 if (left_side) 702 merge_left = can_vma_merge_left(vmg); 703 else 704 merge_left = false; 705 706 if (right_side) { 707 next = vmg->next = vma_iter_next_range(vmg->vmi); 708 vma_iter_prev_range(vmg->vmi); 709 710 merge_right = can_vma_merge_right(vmg, merge_left); 711 } else { 712 merge_right = false; 713 next = NULL; 714 } 715 716 if (merge_left) /* If merging prev, position iterator there. */ 717 vma_prev(vmg->vmi); 718 else if (!merge_right) /* If we have nothing to merge, abort. */ 719 return NULL; 720 721 merge_both = merge_left && merge_right; 722 /* If we span the entire VMA, a merge implies it will be deleted. */ 723 merge_will_delete_vma = left_side && right_side; 724 725 /* 726 * If we need to remove vma in its entirety but are unable to do so, 727 * we have no sensible recourse but to abort the merge. 728 */ 729 if (merge_will_delete_vma && !can_merge_remove_vma(vma)) 730 return NULL; 731 732 /* 733 * If we merge both VMAs, then next is also deleted. This implies 734 * merge_will_delete_vma also. 735 */ 736 merge_will_delete_next = merge_both; 737 738 /* 739 * If we cannot delete next, then we can reduce the operation to merging 740 * prev and vma (thereby deleting vma). 741 */ 742 if (merge_will_delete_next && !can_merge_remove_vma(next)) { 743 merge_will_delete_next = false; 744 merge_right = false; 745 merge_both = false; 746 } 747 748 /* No matter what happens, we will be adjusting vma. */ 749 vma_start_write(vma); 750 751 if (merge_left) 752 vma_start_write(prev); 753 754 if (merge_right) 755 vma_start_write(next); 756 757 if (merge_both) { 758 /* 759 * |<----->| 760 * |-------*********-------| 761 * prev vma next 762 * extend delete delete 763 */ 764 765 vmg->vma = prev; 766 vmg->start = prev->vm_start; 767 vmg->end = next->vm_end; 768 vmg->pgoff = prev->vm_pgoff; 769 770 /* 771 * We already ensured anon_vma compatibility above, so now it's 772 * simply a case of, if prev has no anon_vma object, which of 773 * next or vma contains the anon_vma we must duplicate. 774 */ 775 err = dup_anon_vma(prev, next->anon_vma ? next : vma, &anon_dup); 776 } else if (merge_left) { 777 /* 778 * |<----->| OR 779 * |<--------->| 780 * |-------************* 781 * prev vma 782 * extend shrink/delete 783 */ 784 785 vmg->vma = prev; 786 vmg->start = prev->vm_start; 787 vmg->pgoff = prev->vm_pgoff; 788 789 if (!merge_will_delete_vma) { 790 adjust = vma; 791 adj_start = vmg->end - vma->vm_start; 792 } 793 794 err = dup_anon_vma(prev, vma, &anon_dup); 795 } else { /* merge_right */ 796 /* 797 * |<----->| OR 798 * |<--------->| 799 * *************-------| 800 * vma next 801 * shrink/delete extend 802 */ 803 804 pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start); 805 806 VM_WARN_ON(!merge_right); 807 /* If we are offset into a VMA, then prev must be vma. */ 808 VM_WARN_ON(vmg->start > vma->vm_start && prev && vma != prev); 809 810 if (merge_will_delete_vma) { 811 vmg->vma = next; 812 vmg->end = next->vm_end; 813 vmg->pgoff = next->vm_pgoff - pglen; 814 } else { 815 /* 816 * We shrink vma and expand next. 817 * 818 * IMPORTANT: This is the ONLY case where the final 819 * merged VMA is NOT vmg->vma, but rather vmg->next. 820 */ 821 822 vmg->start = vma->vm_start; 823 vmg->end = start; 824 vmg->pgoff = vma->vm_pgoff; 825 826 adjust = next; 827 adj_start = -(vma->vm_end - start); 828 } 829 830 err = dup_anon_vma(next, vma, &anon_dup); 831 } 832 833 if (err) 834 goto abort; 835 836 /* 837 * In nearly all cases, we expand vmg->vma. There is one exception - 838 * merge_right where we partially span the VMA. In this case we shrink 839 * the end of vmg->vma and adjust the start of vmg->next accordingly. 840 */ 841 expanded = !merge_right || merge_will_delete_vma; 842 843 if (commit_merge(vmg, adjust, 844 merge_will_delete_vma ? vma : NULL, 845 merge_will_delete_next ? next : NULL, 846 adj_start, expanded)) { 847 if (anon_dup) 848 unlink_anon_vmas(anon_dup); 849 850 vmg->state = VMA_MERGE_ERROR_NOMEM; 851 return NULL; 852 } 853 854 res = merge_left ? prev : next; 855 khugepaged_enter_vma(res, vmg->flags); 856 857 vmg->state = VMA_MERGE_SUCCESS; 858 return res; 859 860 abort: 861 vma_iter_set(vmg->vmi, start); 862 vma_iter_load(vmg->vmi); 863 vmg->state = VMA_MERGE_ERROR_NOMEM; 864 return NULL; 865 } 866 867 /* 868 * vma_merge_new_range - Attempt to merge a new VMA into address space 869 * 870 * @vmg: Describes the VMA we are adding, in the range @vmg->start to @vmg->end 871 * (exclusive), which we try to merge with any adjacent VMAs if possible. 872 * 873 * We are about to add a VMA to the address space starting at @vmg->start and 874 * ending at @vmg->end. There are three different possible scenarios: 875 * 876 * 1. There is a VMA with identical properties immediately adjacent to the 877 * proposed new VMA [@vmg->start, @vmg->end) either before or after it - 878 * EXPAND that VMA: 879 * 880 * Proposed: |-----| or |-----| 881 * Existing: |----| |----| 882 * 883 * 2. There are VMAs with identical properties immediately adjacent to the 884 * proposed new VMA [@vmg->start, @vmg->end) both before AND after it - 885 * EXPAND the former and REMOVE the latter: 886 * 887 * Proposed: |-----| 888 * Existing: |----| |----| 889 * 890 * 3. There are no VMAs immediately adjacent to the proposed new VMA or those 891 * VMAs do not have identical attributes - NO MERGE POSSIBLE. 892 * 893 * In instances where we can merge, this function returns the expanded VMA which 894 * will have its range adjusted accordingly and the underlying maple tree also 895 * adjusted. 896 * 897 * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer 898 * to the VMA we expanded. 899 * 900 * This function adjusts @vmg to provide @vmg->next if not already specified, 901 * and adjusts [@vmg->start, @vmg->end) to span the expanded range. 902 * 903 * ASSUMPTIONS: 904 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock. 905 * - The caller must have determined that [@vmg->start, @vmg->end) is empty, 906 other than VMAs that will be unmapped should the operation succeed. 907 * - The caller must have specified the previous vma in @vmg->prev. 908 * - The caller must have specified the next vma in @vmg->next. 909 * - The caller must have positioned the vmi at or before the gap. 910 */ 911 struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) 912 { 913 struct vm_area_struct *prev = vmg->prev; 914 struct vm_area_struct *next = vmg->next; 915 unsigned long start = vmg->start; 916 unsigned long end = vmg->end; 917 pgoff_t pgoff = vmg->pgoff; 918 pgoff_t pglen = PHYS_PFN(end - start); 919 bool can_merge_left, can_merge_right; 920 921 mmap_assert_write_locked(vmg->mm); 922 VM_WARN_ON(vmg->vma); 923 /* vmi must point at or before the gap. */ 924 VM_WARN_ON(vma_iter_addr(vmg->vmi) > end); 925 926 vmg->state = VMA_MERGE_NOMERGE; 927 928 /* Special VMAs are unmergeable, also if no prev/next. */ 929 if ((vmg->flags & VM_SPECIAL) || (!prev && !next)) 930 return NULL; 931 932 can_merge_left = can_vma_merge_left(vmg); 933 can_merge_right = can_vma_merge_right(vmg, can_merge_left); 934 935 /* If we can merge with the next VMA, adjust vmg accordingly. */ 936 if (can_merge_right) { 937 vmg->end = next->vm_end; 938 vmg->vma = next; 939 vmg->pgoff = next->vm_pgoff - pglen; 940 } 941 942 /* If we can merge with the previous VMA, adjust vmg accordingly. */ 943 if (can_merge_left) { 944 vmg->start = prev->vm_start; 945 vmg->vma = prev; 946 vmg->pgoff = prev->vm_pgoff; 947 948 /* 949 * If this merge would result in removal of the next VMA but we 950 * are not permitted to do so, reduce the operation to merging 951 * prev and vma. 952 */ 953 if (can_merge_right && !can_merge_remove_vma(next)) 954 vmg->end = end; 955 956 vma_prev(vmg->vmi); /* Equivalent to going to the previous range */ 957 } 958 959 /* 960 * Now try to expand adjacent VMA(s). This takes care of removing the 961 * following VMA if we have VMAs on both sides. 962 */ 963 if (vmg->vma && !vma_expand(vmg)) { 964 khugepaged_enter_vma(vmg->vma, vmg->flags); 965 vmg->state = VMA_MERGE_SUCCESS; 966 return vmg->vma; 967 } 968 969 /* If expansion failed, reset state. Allows us to retry merge later. */ 970 vmg->vma = NULL; 971 vmg->start = start; 972 vmg->end = end; 973 vmg->pgoff = pgoff; 974 if (vmg->vma == prev) 975 vma_iter_set(vmg->vmi, start); 976 977 return NULL; 978 } 979 980 /* 981 * vma_expand - Expand an existing VMA 982 * 983 * @vmg: Describes a VMA expansion operation. 984 * 985 * Expand @vma to vmg->start and vmg->end. Can expand off the start and end. 986 * Will expand over vmg->next if it's different from vmg->vma and vmg->end == 987 * vmg->next->vm_end. Checking if the vmg->vma can expand and merge with 988 * vmg->next needs to be handled by the caller. 989 * 990 * Returns: 0 on success. 991 * 992 * ASSUMPTIONS: 993 * - The caller must hold a WRITE lock on vmg->vma->mm->mmap_lock. 994 * - The caller must have set @vmg->vma and @vmg->next. 995 */ 996 int vma_expand(struct vma_merge_struct *vmg) 997 { 998 struct vm_area_struct *anon_dup = NULL; 999 bool remove_next = false; 1000 struct vm_area_struct *vma = vmg->vma; 1001 struct vm_area_struct *next = vmg->next; 1002 1003 mmap_assert_write_locked(vmg->mm); 1004 1005 vma_start_write(vma); 1006 if (next && (vma != next) && (vmg->end == next->vm_end)) { 1007 int ret; 1008 1009 remove_next = true; 1010 /* This should already have been checked by this point. */ 1011 VM_WARN_ON(!can_merge_remove_vma(next)); 1012 vma_start_write(next); 1013 ret = dup_anon_vma(vma, next, &anon_dup); 1014 if (ret) 1015 return ret; 1016 } 1017 1018 /* Not merging but overwriting any part of next is not handled. */ 1019 VM_WARN_ON(next && !remove_next && 1020 next != vma && vmg->end > next->vm_start); 1021 /* Only handles expanding */ 1022 VM_WARN_ON(vma->vm_start < vmg->start || vma->vm_end > vmg->end); 1023 1024 if (commit_merge(vmg, NULL, remove_next ? next : NULL, NULL, 0, true)) 1025 goto nomem; 1026 1027 return 0; 1028 1029 nomem: 1030 vmg->state = VMA_MERGE_ERROR_NOMEM; 1031 if (anon_dup) 1032 unlink_anon_vmas(anon_dup); 1033 return -ENOMEM; 1034 } 1035 1036 /* 1037 * vma_shrink() - Reduce an existing VMAs memory area 1038 * @vmi: The vma iterator 1039 * @vma: The VMA to modify 1040 * @start: The new start 1041 * @end: The new end 1042 * 1043 * Returns: 0 on success, -ENOMEM otherwise 1044 */ 1045 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, 1046 unsigned long start, unsigned long end, pgoff_t pgoff) 1047 { 1048 struct vma_prepare vp; 1049 1050 WARN_ON((vma->vm_start != start) && (vma->vm_end != end)); 1051 1052 if (vma->vm_start < start) 1053 vma_iter_config(vmi, vma->vm_start, start); 1054 else 1055 vma_iter_config(vmi, end, vma->vm_end); 1056 1057 if (vma_iter_prealloc(vmi, NULL)) 1058 return -ENOMEM; 1059 1060 vma_start_write(vma); 1061 1062 init_vma_prep(&vp, vma); 1063 vma_prepare(&vp); 1064 vma_adjust_trans_huge(vma, start, end, 0); 1065 1066 vma_iter_clear(vmi); 1067 vma_set_range(vma, start, end, pgoff); 1068 vma_complete(&vp, vmi, vma->vm_mm); 1069 validate_mm(vma->vm_mm); 1070 return 0; 1071 } 1072 1073 static inline void vms_clear_ptes(struct vma_munmap_struct *vms, 1074 struct ma_state *mas_detach, bool mm_wr_locked) 1075 { 1076 struct mmu_gather tlb; 1077 1078 if (!vms->clear_ptes) /* Nothing to do */ 1079 return; 1080 1081 /* 1082 * We can free page tables without write-locking mmap_lock because VMAs 1083 * were isolated before we downgraded mmap_lock. 1084 */ 1085 mas_set(mas_detach, 1); 1086 lru_add_drain(); 1087 tlb_gather_mmu(&tlb, vms->vma->vm_mm); 1088 update_hiwater_rss(vms->vma->vm_mm); 1089 unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end, 1090 vms->vma_count, mm_wr_locked); 1091 1092 mas_set(mas_detach, 1); 1093 /* start and end may be different if there is no prev or next vma. */ 1094 free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start, 1095 vms->unmap_end, mm_wr_locked); 1096 tlb_finish_mmu(&tlb); 1097 vms->clear_ptes = false; 1098 } 1099 1100 void vms_clean_up_area(struct vma_munmap_struct *vms, 1101 struct ma_state *mas_detach) 1102 { 1103 struct vm_area_struct *vma; 1104 1105 if (!vms->nr_pages) 1106 return; 1107 1108 vms_clear_ptes(vms, mas_detach, true); 1109 mas_set(mas_detach, 0); 1110 mas_for_each(mas_detach, vma, ULONG_MAX) 1111 if (vma->vm_ops && vma->vm_ops->close) 1112 vma->vm_ops->close(vma); 1113 vms->closed_vm_ops = true; 1114 } 1115 1116 /* 1117 * vms_complete_munmap_vmas() - Finish the munmap() operation 1118 * @vms: The vma munmap struct 1119 * @mas_detach: The maple state of the detached vmas 1120 * 1121 * This updates the mm_struct, unmaps the region, frees the resources 1122 * used for the munmap() and may downgrade the lock - if requested. Everything 1123 * needed to be done once the vma maple tree is updated. 1124 */ 1125 void vms_complete_munmap_vmas(struct vma_munmap_struct *vms, 1126 struct ma_state *mas_detach) 1127 { 1128 struct vm_area_struct *vma; 1129 struct mm_struct *mm; 1130 1131 mm = current->mm; 1132 mm->map_count -= vms->vma_count; 1133 mm->locked_vm -= vms->locked_vm; 1134 if (vms->unlock) 1135 mmap_write_downgrade(mm); 1136 1137 if (!vms->nr_pages) 1138 return; 1139 1140 vms_clear_ptes(vms, mas_detach, !vms->unlock); 1141 /* Update high watermark before we lower total_vm */ 1142 update_hiwater_vm(mm); 1143 /* Stat accounting */ 1144 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages); 1145 /* Paranoid bookkeeping */ 1146 VM_WARN_ON(vms->exec_vm > mm->exec_vm); 1147 VM_WARN_ON(vms->stack_vm > mm->stack_vm); 1148 VM_WARN_ON(vms->data_vm > mm->data_vm); 1149 mm->exec_vm -= vms->exec_vm; 1150 mm->stack_vm -= vms->stack_vm; 1151 mm->data_vm -= vms->data_vm; 1152 1153 /* Remove and clean up vmas */ 1154 mas_set(mas_detach, 0); 1155 mas_for_each(mas_detach, vma, ULONG_MAX) 1156 remove_vma(vma, /* = */ false, vms->closed_vm_ops); 1157 1158 vm_unacct_memory(vms->nr_accounted); 1159 validate_mm(mm); 1160 if (vms->unlock) 1161 mmap_read_unlock(mm); 1162 1163 __mt_destroy(mas_detach->tree); 1164 } 1165 1166 /* 1167 * vms_gather_munmap_vmas() - Put all VMAs within a range into a maple tree 1168 * for removal at a later date. Handles splitting first and last if necessary 1169 * and marking the vmas as isolated. 1170 * 1171 * @vms: The vma munmap struct 1172 * @mas_detach: The maple state tracking the detached tree 1173 * 1174 * Return: 0 on success, error otherwise 1175 */ 1176 int vms_gather_munmap_vmas(struct vma_munmap_struct *vms, 1177 struct ma_state *mas_detach) 1178 { 1179 struct vm_area_struct *next = NULL; 1180 int error; 1181 1182 /* 1183 * If we need to split any vma, do it now to save pain later. 1184 * Does it split the first one? 1185 */ 1186 if (vms->start > vms->vma->vm_start) { 1187 1188 /* 1189 * Make sure that map_count on return from munmap() will 1190 * not exceed its limit; but let map_count go just above 1191 * its limit temporarily, to help free resources as expected. 1192 */ 1193 if (vms->end < vms->vma->vm_end && 1194 vms->vma->vm_mm->map_count >= sysctl_max_map_count) { 1195 error = -ENOMEM; 1196 goto map_count_exceeded; 1197 } 1198 1199 /* Don't bother splitting the VMA if we can't unmap it anyway */ 1200 if (!can_modify_vma(vms->vma)) { 1201 error = -EPERM; 1202 goto start_split_failed; 1203 } 1204 1205 error = __split_vma(vms->vmi, vms->vma, vms->start, 1); 1206 if (error) 1207 goto start_split_failed; 1208 } 1209 vms->prev = vma_prev(vms->vmi); 1210 if (vms->prev) 1211 vms->unmap_start = vms->prev->vm_end; 1212 1213 /* 1214 * Detach a range of VMAs from the mm. Using next as a temp variable as 1215 * it is always overwritten. 1216 */ 1217 for_each_vma_range(*(vms->vmi), next, vms->end) { 1218 long nrpages; 1219 1220 if (!can_modify_vma(next)) { 1221 error = -EPERM; 1222 goto modify_vma_failed; 1223 } 1224 /* Does it split the end? */ 1225 if (next->vm_end > vms->end) { 1226 error = __split_vma(vms->vmi, next, vms->end, 0); 1227 if (error) 1228 goto end_split_failed; 1229 } 1230 vma_start_write(next); 1231 mas_set(mas_detach, vms->vma_count++); 1232 error = mas_store_gfp(mas_detach, next, GFP_KERNEL); 1233 if (error) 1234 goto munmap_gather_failed; 1235 1236 vma_mark_detached(next, true); 1237 nrpages = vma_pages(next); 1238 1239 vms->nr_pages += nrpages; 1240 if (next->vm_flags & VM_LOCKED) 1241 vms->locked_vm += nrpages; 1242 1243 if (next->vm_flags & VM_ACCOUNT) 1244 vms->nr_accounted += nrpages; 1245 1246 if (is_exec_mapping(next->vm_flags)) 1247 vms->exec_vm += nrpages; 1248 else if (is_stack_mapping(next->vm_flags)) 1249 vms->stack_vm += nrpages; 1250 else if (is_data_mapping(next->vm_flags)) 1251 vms->data_vm += nrpages; 1252 1253 if (unlikely(vms->uf)) { 1254 /* 1255 * If userfaultfd_unmap_prep returns an error the vmas 1256 * will remain split, but userland will get a 1257 * highly unexpected error anyway. This is no 1258 * different than the case where the first of the two 1259 * __split_vma fails, but we don't undo the first 1260 * split, despite we could. This is unlikely enough 1261 * failure that it's not worth optimizing it for. 1262 */ 1263 error = userfaultfd_unmap_prep(next, vms->start, 1264 vms->end, vms->uf); 1265 if (error) 1266 goto userfaultfd_error; 1267 } 1268 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 1269 BUG_ON(next->vm_start < vms->start); 1270 BUG_ON(next->vm_start > vms->end); 1271 #endif 1272 } 1273 1274 vms->next = vma_next(vms->vmi); 1275 if (vms->next) 1276 vms->unmap_end = vms->next->vm_start; 1277 1278 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 1279 /* Make sure no VMAs are about to be lost. */ 1280 { 1281 MA_STATE(test, mas_detach->tree, 0, 0); 1282 struct vm_area_struct *vma_mas, *vma_test; 1283 int test_count = 0; 1284 1285 vma_iter_set(vms->vmi, vms->start); 1286 rcu_read_lock(); 1287 vma_test = mas_find(&test, vms->vma_count - 1); 1288 for_each_vma_range(*(vms->vmi), vma_mas, vms->end) { 1289 BUG_ON(vma_mas != vma_test); 1290 test_count++; 1291 vma_test = mas_next(&test, vms->vma_count - 1); 1292 } 1293 rcu_read_unlock(); 1294 BUG_ON(vms->vma_count != test_count); 1295 } 1296 #endif 1297 1298 while (vma_iter_addr(vms->vmi) > vms->start) 1299 vma_iter_prev_range(vms->vmi); 1300 1301 vms->clear_ptes = true; 1302 return 0; 1303 1304 userfaultfd_error: 1305 munmap_gather_failed: 1306 end_split_failed: 1307 modify_vma_failed: 1308 reattach_vmas(mas_detach); 1309 start_split_failed: 1310 map_count_exceeded: 1311 return error; 1312 } 1313 1314 /* 1315 * do_vmi_align_munmap() - munmap the aligned region from @start to @end. 1316 * @vmi: The vma iterator 1317 * @vma: The starting vm_area_struct 1318 * @mm: The mm_struct 1319 * @start: The aligned start address to munmap. 1320 * @end: The aligned end address to munmap. 1321 * @uf: The userfaultfd list_head 1322 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on 1323 * success. 1324 * 1325 * Return: 0 on success and drops the lock if so directed, error and leaves the 1326 * lock held otherwise. 1327 */ 1328 int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 1329 struct mm_struct *mm, unsigned long start, unsigned long end, 1330 struct list_head *uf, bool unlock) 1331 { 1332 struct maple_tree mt_detach; 1333 MA_STATE(mas_detach, &mt_detach, 0, 0); 1334 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); 1335 mt_on_stack(mt_detach); 1336 struct vma_munmap_struct vms; 1337 int error; 1338 1339 init_vma_munmap(&vms, vmi, vma, start, end, uf, unlock); 1340 error = vms_gather_munmap_vmas(&vms, &mas_detach); 1341 if (error) 1342 goto gather_failed; 1343 1344 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL); 1345 if (error) 1346 goto clear_tree_failed; 1347 1348 /* Point of no return */ 1349 vms_complete_munmap_vmas(&vms, &mas_detach); 1350 return 0; 1351 1352 clear_tree_failed: 1353 reattach_vmas(&mas_detach); 1354 gather_failed: 1355 validate_mm(mm); 1356 return error; 1357 } 1358 1359 /* 1360 * do_vmi_munmap() - munmap a given range. 1361 * @vmi: The vma iterator 1362 * @mm: The mm_struct 1363 * @start: The start address to munmap 1364 * @len: The length of the range to munmap 1365 * @uf: The userfaultfd list_head 1366 * @unlock: set to true if the user wants to drop the mmap_lock on success 1367 * 1368 * This function takes a @mas that is either pointing to the previous VMA or set 1369 * to MA_START and sets it up to remove the mapping(s). The @len will be 1370 * aligned. 1371 * 1372 * Return: 0 on success and drops the lock if so directed, error and leaves the 1373 * lock held otherwise. 1374 */ 1375 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, 1376 unsigned long start, size_t len, struct list_head *uf, 1377 bool unlock) 1378 { 1379 unsigned long end; 1380 struct vm_area_struct *vma; 1381 1382 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) 1383 return -EINVAL; 1384 1385 end = start + PAGE_ALIGN(len); 1386 if (end == start) 1387 return -EINVAL; 1388 1389 /* Find the first overlapping VMA */ 1390 vma = vma_find(vmi, end); 1391 if (!vma) { 1392 if (unlock) 1393 mmap_write_unlock(mm); 1394 return 0; 1395 } 1396 1397 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); 1398 } 1399 1400 /* 1401 * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd 1402 * context and anonymous VMA name within the range [start, end). 1403 * 1404 * As a result, we might be able to merge the newly modified VMA range with an 1405 * adjacent VMA with identical properties. 1406 * 1407 * If no merge is possible and the range does not span the entirety of the VMA, 1408 * we then need to split the VMA to accommodate the change. 1409 * 1410 * The function returns either the merged VMA, the original VMA if a split was 1411 * required instead, or an error if the split failed. 1412 */ 1413 static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg) 1414 { 1415 struct vm_area_struct *vma = vmg->vma; 1416 struct vm_area_struct *merged; 1417 1418 /* First, try to merge. */ 1419 merged = vma_merge_existing_range(vmg); 1420 if (merged) 1421 return merged; 1422 1423 /* Split any preceding portion of the VMA. */ 1424 if (vma->vm_start < vmg->start) { 1425 int err = split_vma(vmg->vmi, vma, vmg->start, 1); 1426 1427 if (err) 1428 return ERR_PTR(err); 1429 } 1430 1431 /* Split any trailing portion of the VMA. */ 1432 if (vma->vm_end > vmg->end) { 1433 int err = split_vma(vmg->vmi, vma, vmg->end, 0); 1434 1435 if (err) 1436 return ERR_PTR(err); 1437 } 1438 1439 return vma; 1440 } 1441 1442 struct vm_area_struct *vma_modify_flags( 1443 struct vma_iterator *vmi, struct vm_area_struct *prev, 1444 struct vm_area_struct *vma, unsigned long start, unsigned long end, 1445 unsigned long new_flags) 1446 { 1447 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1448 1449 vmg.flags = new_flags; 1450 1451 return vma_modify(&vmg); 1452 } 1453 1454 struct vm_area_struct 1455 *vma_modify_flags_name(struct vma_iterator *vmi, 1456 struct vm_area_struct *prev, 1457 struct vm_area_struct *vma, 1458 unsigned long start, 1459 unsigned long end, 1460 unsigned long new_flags, 1461 struct anon_vma_name *new_name) 1462 { 1463 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1464 1465 vmg.flags = new_flags; 1466 vmg.anon_name = new_name; 1467 1468 return vma_modify(&vmg); 1469 } 1470 1471 struct vm_area_struct 1472 *vma_modify_policy(struct vma_iterator *vmi, 1473 struct vm_area_struct *prev, 1474 struct vm_area_struct *vma, 1475 unsigned long start, unsigned long end, 1476 struct mempolicy *new_pol) 1477 { 1478 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1479 1480 vmg.policy = new_pol; 1481 1482 return vma_modify(&vmg); 1483 } 1484 1485 struct vm_area_struct 1486 *vma_modify_flags_uffd(struct vma_iterator *vmi, 1487 struct vm_area_struct *prev, 1488 struct vm_area_struct *vma, 1489 unsigned long start, unsigned long end, 1490 unsigned long new_flags, 1491 struct vm_userfaultfd_ctx new_ctx) 1492 { 1493 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1494 1495 vmg.flags = new_flags; 1496 vmg.uffd_ctx = new_ctx; 1497 1498 return vma_modify(&vmg); 1499 } 1500 1501 /* 1502 * Expand vma by delta bytes, potentially merging with an immediately adjacent 1503 * VMA with identical properties. 1504 */ 1505 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi, 1506 struct vm_area_struct *vma, 1507 unsigned long delta) 1508 { 1509 VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta); 1510 1511 vmg.next = vma_iter_next_rewind(vmi, NULL); 1512 vmg.vma = NULL; /* We use the VMA to populate VMG fields only. */ 1513 1514 return vma_merge_new_range(&vmg); 1515 } 1516 1517 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb) 1518 { 1519 vb->count = 0; 1520 } 1521 1522 static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb) 1523 { 1524 struct address_space *mapping; 1525 int i; 1526 1527 mapping = vb->vmas[0]->vm_file->f_mapping; 1528 i_mmap_lock_write(mapping); 1529 for (i = 0; i < vb->count; i++) { 1530 VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping); 1531 __remove_shared_vm_struct(vb->vmas[i], mapping); 1532 } 1533 i_mmap_unlock_write(mapping); 1534 1535 unlink_file_vma_batch_init(vb); 1536 } 1537 1538 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb, 1539 struct vm_area_struct *vma) 1540 { 1541 if (vma->vm_file == NULL) 1542 return; 1543 1544 if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) || 1545 vb->count == ARRAY_SIZE(vb->vmas)) 1546 unlink_file_vma_batch_process(vb); 1547 1548 vb->vmas[vb->count] = vma; 1549 vb->count++; 1550 } 1551 1552 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb) 1553 { 1554 if (vb->count > 0) 1555 unlink_file_vma_batch_process(vb); 1556 } 1557 1558 /* 1559 * Unlink a file-based vm structure from its interval tree, to hide 1560 * vma from rmap and vmtruncate before freeing its page tables. 1561 */ 1562 void unlink_file_vma(struct vm_area_struct *vma) 1563 { 1564 struct file *file = vma->vm_file; 1565 1566 if (file) { 1567 struct address_space *mapping = file->f_mapping; 1568 1569 i_mmap_lock_write(mapping); 1570 __remove_shared_vm_struct(vma, mapping); 1571 i_mmap_unlock_write(mapping); 1572 } 1573 } 1574 1575 void vma_link_file(struct vm_area_struct *vma) 1576 { 1577 struct file *file = vma->vm_file; 1578 struct address_space *mapping; 1579 1580 if (file) { 1581 mapping = file->f_mapping; 1582 i_mmap_lock_write(mapping); 1583 __vma_link_file(vma, mapping); 1584 i_mmap_unlock_write(mapping); 1585 } 1586 } 1587 1588 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) 1589 { 1590 VMA_ITERATOR(vmi, mm, 0); 1591 1592 vma_iter_config(&vmi, vma->vm_start, vma->vm_end); 1593 if (vma_iter_prealloc(&vmi, vma)) 1594 return -ENOMEM; 1595 1596 vma_start_write(vma); 1597 vma_iter_store(&vmi, vma); 1598 vma_link_file(vma); 1599 mm->map_count++; 1600 validate_mm(mm); 1601 return 0; 1602 } 1603 1604 /* 1605 * Copy the vma structure to a new location in the same mm, 1606 * prior to moving page table entries, to effect an mremap move. 1607 */ 1608 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 1609 unsigned long addr, unsigned long len, pgoff_t pgoff, 1610 bool *need_rmap_locks) 1611 { 1612 struct vm_area_struct *vma = *vmap; 1613 unsigned long vma_start = vma->vm_start; 1614 struct mm_struct *mm = vma->vm_mm; 1615 struct vm_area_struct *new_vma; 1616 bool faulted_in_anon_vma = true; 1617 VMA_ITERATOR(vmi, mm, addr); 1618 VMG_VMA_STATE(vmg, &vmi, NULL, vma, addr, addr + len); 1619 1620 /* 1621 * If anonymous vma has not yet been faulted, update new pgoff 1622 * to match new location, to increase its chance of merging. 1623 */ 1624 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { 1625 pgoff = addr >> PAGE_SHIFT; 1626 faulted_in_anon_vma = false; 1627 } 1628 1629 new_vma = find_vma_prev(mm, addr, &vmg.prev); 1630 if (new_vma && new_vma->vm_start < addr + len) 1631 return NULL; /* should never get here */ 1632 1633 vmg.vma = NULL; /* New VMA range. */ 1634 vmg.pgoff = pgoff; 1635 vmg.next = vma_iter_next_rewind(&vmi, NULL); 1636 new_vma = vma_merge_new_range(&vmg); 1637 1638 if (new_vma) { 1639 /* 1640 * Source vma may have been merged into new_vma 1641 */ 1642 if (unlikely(vma_start >= new_vma->vm_start && 1643 vma_start < new_vma->vm_end)) { 1644 /* 1645 * The only way we can get a vma_merge with 1646 * self during an mremap is if the vma hasn't 1647 * been faulted in yet and we were allowed to 1648 * reset the dst vma->vm_pgoff to the 1649 * destination address of the mremap to allow 1650 * the merge to happen. mremap must change the 1651 * vm_pgoff linearity between src and dst vmas 1652 * (in turn preventing a vma_merge) to be 1653 * safe. It is only safe to keep the vm_pgoff 1654 * linear if there are no pages mapped yet. 1655 */ 1656 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); 1657 *vmap = vma = new_vma; 1658 } 1659 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); 1660 } else { 1661 new_vma = vm_area_dup(vma); 1662 if (!new_vma) 1663 goto out; 1664 vma_set_range(new_vma, addr, addr + len, pgoff); 1665 if (vma_dup_policy(vma, new_vma)) 1666 goto out_free_vma; 1667 if (anon_vma_clone(new_vma, vma)) 1668 goto out_free_mempol; 1669 if (new_vma->vm_file) 1670 get_file(new_vma->vm_file); 1671 if (new_vma->vm_ops && new_vma->vm_ops->open) 1672 new_vma->vm_ops->open(new_vma); 1673 if (vma_link(mm, new_vma)) 1674 goto out_vma_link; 1675 *need_rmap_locks = false; 1676 } 1677 return new_vma; 1678 1679 out_vma_link: 1680 if (new_vma->vm_ops && new_vma->vm_ops->close) 1681 new_vma->vm_ops->close(new_vma); 1682 1683 if (new_vma->vm_file) 1684 fput(new_vma->vm_file); 1685 1686 unlink_anon_vmas(new_vma); 1687 out_free_mempol: 1688 mpol_put(vma_policy(new_vma)); 1689 out_free_vma: 1690 vm_area_free(new_vma); 1691 out: 1692 return NULL; 1693 } 1694 1695 /* 1696 * Rough compatibility check to quickly see if it's even worth looking 1697 * at sharing an anon_vma. 1698 * 1699 * They need to have the same vm_file, and the flags can only differ 1700 * in things that mprotect may change. 1701 * 1702 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that 1703 * we can merge the two vma's. For example, we refuse to merge a vma if 1704 * there is a vm_ops->close() function, because that indicates that the 1705 * driver is doing some kind of reference counting. But that doesn't 1706 * really matter for the anon_vma sharing case. 1707 */ 1708 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) 1709 { 1710 return a->vm_end == b->vm_start && 1711 mpol_equal(vma_policy(a), vma_policy(b)) && 1712 a->vm_file == b->vm_file && 1713 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) && 1714 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); 1715 } 1716 1717 /* 1718 * Do some basic sanity checking to see if we can re-use the anon_vma 1719 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be 1720 * the same as 'old', the other will be the new one that is trying 1721 * to share the anon_vma. 1722 * 1723 * NOTE! This runs with mmap_lock held for reading, so it is possible that 1724 * the anon_vma of 'old' is concurrently in the process of being set up 1725 * by another page fault trying to merge _that_. But that's ok: if it 1726 * is being set up, that automatically means that it will be a singleton 1727 * acceptable for merging, so we can do all of this optimistically. But 1728 * we do that READ_ONCE() to make sure that we never re-load the pointer. 1729 * 1730 * IOW: that the "list_is_singular()" test on the anon_vma_chain only 1731 * matters for the 'stable anon_vma' case (ie the thing we want to avoid 1732 * is to return an anon_vma that is "complex" due to having gone through 1733 * a fork). 1734 * 1735 * We also make sure that the two vma's are compatible (adjacent, 1736 * and with the same memory policies). That's all stable, even with just 1737 * a read lock on the mmap_lock. 1738 */ 1739 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, 1740 struct vm_area_struct *a, 1741 struct vm_area_struct *b) 1742 { 1743 if (anon_vma_compatible(a, b)) { 1744 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); 1745 1746 if (anon_vma && list_is_singular(&old->anon_vma_chain)) 1747 return anon_vma; 1748 } 1749 return NULL; 1750 } 1751 1752 /* 1753 * find_mergeable_anon_vma is used by anon_vma_prepare, to check 1754 * neighbouring vmas for a suitable anon_vma, before it goes off 1755 * to allocate a new anon_vma. It checks because a repetitive 1756 * sequence of mprotects and faults may otherwise lead to distinct 1757 * anon_vmas being allocated, preventing vma merge in subsequent 1758 * mprotect. 1759 */ 1760 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) 1761 { 1762 struct anon_vma *anon_vma = NULL; 1763 struct vm_area_struct *prev, *next; 1764 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end); 1765 1766 /* Try next first. */ 1767 next = vma_iter_load(&vmi); 1768 if (next) { 1769 anon_vma = reusable_anon_vma(next, vma, next); 1770 if (anon_vma) 1771 return anon_vma; 1772 } 1773 1774 prev = vma_prev(&vmi); 1775 VM_BUG_ON_VMA(prev != vma, vma); 1776 prev = vma_prev(&vmi); 1777 /* Try prev next. */ 1778 if (prev) 1779 anon_vma = reusable_anon_vma(prev, prev, vma); 1780 1781 /* 1782 * We might reach here with anon_vma == NULL if we can't find 1783 * any reusable anon_vma. 1784 * There's no absolute need to look only at touching neighbours: 1785 * we could search further afield for "compatible" anon_vmas. 1786 * But it would probably just be a waste of time searching, 1787 * or lead to too many vmas hanging off the same anon_vma. 1788 * We're trying to allow mprotect remerging later on, 1789 * not trying to minimize memory used for anon_vmas. 1790 */ 1791 return anon_vma; 1792 } 1793 1794 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops) 1795 { 1796 return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite); 1797 } 1798 1799 static bool vma_is_shared_writable(struct vm_area_struct *vma) 1800 { 1801 return (vma->vm_flags & (VM_WRITE | VM_SHARED)) == 1802 (VM_WRITE | VM_SHARED); 1803 } 1804 1805 static bool vma_fs_can_writeback(struct vm_area_struct *vma) 1806 { 1807 /* No managed pages to writeback. */ 1808 if (vma->vm_flags & VM_PFNMAP) 1809 return false; 1810 1811 return vma->vm_file && vma->vm_file->f_mapping && 1812 mapping_can_writeback(vma->vm_file->f_mapping); 1813 } 1814 1815 /* 1816 * Does this VMA require the underlying folios to have their dirty state 1817 * tracked? 1818 */ 1819 bool vma_needs_dirty_tracking(struct vm_area_struct *vma) 1820 { 1821 /* Only shared, writable VMAs require dirty tracking. */ 1822 if (!vma_is_shared_writable(vma)) 1823 return false; 1824 1825 /* Does the filesystem need to be notified? */ 1826 if (vm_ops_needs_writenotify(vma->vm_ops)) 1827 return true; 1828 1829 /* 1830 * Even if the filesystem doesn't indicate a need for writenotify, if it 1831 * can writeback, dirty tracking is still required. 1832 */ 1833 return vma_fs_can_writeback(vma); 1834 } 1835 1836 /* 1837 * Some shared mappings will want the pages marked read-only 1838 * to track write events. If so, we'll downgrade vm_page_prot 1839 * to the private version (using protection_map[] without the 1840 * VM_SHARED bit). 1841 */ 1842 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) 1843 { 1844 /* If it was private or non-writable, the write bit is already clear */ 1845 if (!vma_is_shared_writable(vma)) 1846 return false; 1847 1848 /* The backer wishes to know when pages are first written to? */ 1849 if (vm_ops_needs_writenotify(vma->vm_ops)) 1850 return true; 1851 1852 /* The open routine did something to the protections that pgprot_modify 1853 * won't preserve? */ 1854 if (pgprot_val(vm_page_prot) != 1855 pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags))) 1856 return false; 1857 1858 /* 1859 * Do we need to track softdirty? hugetlb does not support softdirty 1860 * tracking yet. 1861 */ 1862 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma)) 1863 return true; 1864 1865 /* Do we need write faults for uffd-wp tracking? */ 1866 if (userfaultfd_wp(vma)) 1867 return true; 1868 1869 /* Can the mapping track the dirty pages? */ 1870 return vma_fs_can_writeback(vma); 1871 } 1872 1873 static DEFINE_MUTEX(mm_all_locks_mutex); 1874 1875 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) 1876 { 1877 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 1878 /* 1879 * The LSB of head.next can't change from under us 1880 * because we hold the mm_all_locks_mutex. 1881 */ 1882 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); 1883 /* 1884 * We can safely modify head.next after taking the 1885 * anon_vma->root->rwsem. If some other vma in this mm shares 1886 * the same anon_vma we won't take it again. 1887 * 1888 * No need of atomic instructions here, head.next 1889 * can't change from under us thanks to the 1890 * anon_vma->root->rwsem. 1891 */ 1892 if (__test_and_set_bit(0, (unsigned long *) 1893 &anon_vma->root->rb_root.rb_root.rb_node)) 1894 BUG(); 1895 } 1896 } 1897 1898 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) 1899 { 1900 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 1901 /* 1902 * AS_MM_ALL_LOCKS can't change from under us because 1903 * we hold the mm_all_locks_mutex. 1904 * 1905 * Operations on ->flags have to be atomic because 1906 * even if AS_MM_ALL_LOCKS is stable thanks to the 1907 * mm_all_locks_mutex, there may be other cpus 1908 * changing other bitflags in parallel to us. 1909 */ 1910 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) 1911 BUG(); 1912 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); 1913 } 1914 } 1915 1916 /* 1917 * This operation locks against the VM for all pte/vma/mm related 1918 * operations that could ever happen on a certain mm. This includes 1919 * vmtruncate, try_to_unmap, and all page faults. 1920 * 1921 * The caller must take the mmap_lock in write mode before calling 1922 * mm_take_all_locks(). The caller isn't allowed to release the 1923 * mmap_lock until mm_drop_all_locks() returns. 1924 * 1925 * mmap_lock in write mode is required in order to block all operations 1926 * that could modify pagetables and free pages without need of 1927 * altering the vma layout. It's also needed in write mode to avoid new 1928 * anon_vmas to be associated with existing vmas. 1929 * 1930 * A single task can't take more than one mm_take_all_locks() in a row 1931 * or it would deadlock. 1932 * 1933 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in 1934 * mapping->flags avoid to take the same lock twice, if more than one 1935 * vma in this mm is backed by the same anon_vma or address_space. 1936 * 1937 * We take locks in following order, accordingly to comment at beginning 1938 * of mm/rmap.c: 1939 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for 1940 * hugetlb mapping); 1941 * - all vmas marked locked 1942 * - all i_mmap_rwsem locks; 1943 * - all anon_vma->rwseml 1944 * 1945 * We can take all locks within these types randomly because the VM code 1946 * doesn't nest them and we protected from parallel mm_take_all_locks() by 1947 * mm_all_locks_mutex. 1948 * 1949 * mm_take_all_locks() and mm_drop_all_locks are expensive operations 1950 * that may have to take thousand of locks. 1951 * 1952 * mm_take_all_locks() can fail if it's interrupted by signals. 1953 */ 1954 int mm_take_all_locks(struct mm_struct *mm) 1955 { 1956 struct vm_area_struct *vma; 1957 struct anon_vma_chain *avc; 1958 VMA_ITERATOR(vmi, mm, 0); 1959 1960 mmap_assert_write_locked(mm); 1961 1962 mutex_lock(&mm_all_locks_mutex); 1963 1964 /* 1965 * vma_start_write() does not have a complement in mm_drop_all_locks() 1966 * because vma_start_write() is always asymmetrical; it marks a VMA as 1967 * being written to until mmap_write_unlock() or mmap_write_downgrade() 1968 * is reached. 1969 */ 1970 for_each_vma(vmi, vma) { 1971 if (signal_pending(current)) 1972 goto out_unlock; 1973 vma_start_write(vma); 1974 } 1975 1976 vma_iter_init(&vmi, mm, 0); 1977 for_each_vma(vmi, vma) { 1978 if (signal_pending(current)) 1979 goto out_unlock; 1980 if (vma->vm_file && vma->vm_file->f_mapping && 1981 is_vm_hugetlb_page(vma)) 1982 vm_lock_mapping(mm, vma->vm_file->f_mapping); 1983 } 1984 1985 vma_iter_init(&vmi, mm, 0); 1986 for_each_vma(vmi, vma) { 1987 if (signal_pending(current)) 1988 goto out_unlock; 1989 if (vma->vm_file && vma->vm_file->f_mapping && 1990 !is_vm_hugetlb_page(vma)) 1991 vm_lock_mapping(mm, vma->vm_file->f_mapping); 1992 } 1993 1994 vma_iter_init(&vmi, mm, 0); 1995 for_each_vma(vmi, vma) { 1996 if (signal_pending(current)) 1997 goto out_unlock; 1998 if (vma->anon_vma) 1999 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 2000 vm_lock_anon_vma(mm, avc->anon_vma); 2001 } 2002 2003 return 0; 2004 2005 out_unlock: 2006 mm_drop_all_locks(mm); 2007 return -EINTR; 2008 } 2009 2010 static void vm_unlock_anon_vma(struct anon_vma *anon_vma) 2011 { 2012 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 2013 /* 2014 * The LSB of head.next can't change to 0 from under 2015 * us because we hold the mm_all_locks_mutex. 2016 * 2017 * We must however clear the bitflag before unlocking 2018 * the vma so the users using the anon_vma->rb_root will 2019 * never see our bitflag. 2020 * 2021 * No need of atomic instructions here, head.next 2022 * can't change from under us until we release the 2023 * anon_vma->root->rwsem. 2024 */ 2025 if (!__test_and_clear_bit(0, (unsigned long *) 2026 &anon_vma->root->rb_root.rb_root.rb_node)) 2027 BUG(); 2028 anon_vma_unlock_write(anon_vma); 2029 } 2030 } 2031 2032 static void vm_unlock_mapping(struct address_space *mapping) 2033 { 2034 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 2035 /* 2036 * AS_MM_ALL_LOCKS can't change to 0 from under us 2037 * because we hold the mm_all_locks_mutex. 2038 */ 2039 i_mmap_unlock_write(mapping); 2040 if (!test_and_clear_bit(AS_MM_ALL_LOCKS, 2041 &mapping->flags)) 2042 BUG(); 2043 } 2044 } 2045 2046 /* 2047 * The mmap_lock cannot be released by the caller until 2048 * mm_drop_all_locks() returns. 2049 */ 2050 void mm_drop_all_locks(struct mm_struct *mm) 2051 { 2052 struct vm_area_struct *vma; 2053 struct anon_vma_chain *avc; 2054 VMA_ITERATOR(vmi, mm, 0); 2055 2056 mmap_assert_write_locked(mm); 2057 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); 2058 2059 for_each_vma(vmi, vma) { 2060 if (vma->anon_vma) 2061 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 2062 vm_unlock_anon_vma(avc->anon_vma); 2063 if (vma->vm_file && vma->vm_file->f_mapping) 2064 vm_unlock_mapping(vma->vm_file->f_mapping); 2065 } 2066 2067 mutex_unlock(&mm_all_locks_mutex); 2068 } 2069