1 // SPDX-License-Identifier: GPL-2.0-or-later 2 3 /* 4 * VMA-specific functions. 5 */ 6 7 #include "vma_internal.h" 8 #include "vma.h" 9 10 /* 11 * If the vma has a ->close operation then the driver probably needs to release 12 * per-vma resources, so we don't attempt to merge those if the caller indicates 13 * the current vma may be removed as part of the merge. 14 */ 15 static inline bool is_mergeable_vma(struct vm_area_struct *vma, 16 struct file *file, unsigned long vm_flags, 17 struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 18 struct anon_vma_name *anon_name, bool may_remove_vma) 19 { 20 /* 21 * VM_SOFTDIRTY should not prevent from VMA merging, if we 22 * match the flags but dirty bit -- the caller should mark 23 * merged VMA as dirty. If dirty bit won't be excluded from 24 * comparison, we increase pressure on the memory system forcing 25 * the kernel to generate new VMAs when old one could be 26 * extended instead. 27 */ 28 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) 29 return false; 30 if (vma->vm_file != file) 31 return false; 32 if (may_remove_vma && vma->vm_ops && vma->vm_ops->close) 33 return false; 34 if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx)) 35 return false; 36 if (!anon_vma_name_eq(anon_vma_name(vma), anon_name)) 37 return false; 38 return true; 39 } 40 41 static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1, 42 struct anon_vma *anon_vma2, struct vm_area_struct *vma) 43 { 44 /* 45 * The list_is_singular() test is to avoid merging VMA cloned from 46 * parents. This can improve scalability caused by anon_vma lock. 47 */ 48 if ((!anon_vma1 || !anon_vma2) && (!vma || 49 list_is_singular(&vma->anon_vma_chain))) 50 return true; 51 return anon_vma1 == anon_vma2; 52 } 53 54 /* 55 * init_multi_vma_prep() - Initializer for struct vma_prepare 56 * @vp: The vma_prepare struct 57 * @vma: The vma that will be altered once locked 58 * @next: The next vma if it is to be adjusted 59 * @remove: The first vma to be removed 60 * @remove2: The second vma to be removed 61 */ 62 static void init_multi_vma_prep(struct vma_prepare *vp, 63 struct vm_area_struct *vma, 64 struct vm_area_struct *next, 65 struct vm_area_struct *remove, 66 struct vm_area_struct *remove2) 67 { 68 memset(vp, 0, sizeof(struct vma_prepare)); 69 vp->vma = vma; 70 vp->anon_vma = vma->anon_vma; 71 vp->remove = remove; 72 vp->remove2 = remove2; 73 vp->adj_next = next; 74 if (!vp->anon_vma && next) 75 vp->anon_vma = next->anon_vma; 76 77 vp->file = vma->vm_file; 78 if (vp->file) 79 vp->mapping = vma->vm_file->f_mapping; 80 81 } 82 83 /* 84 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 85 * in front of (at a lower virtual address and file offset than) the vma. 86 * 87 * We cannot merge two vmas if they have differently assigned (non-NULL) 88 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 89 * 90 * We don't check here for the merged mmap wrapping around the end of pagecache 91 * indices (16TB on ia32) because do_mmap() does not permit mmap's which 92 * wrap, nor mmaps which cover the final page at index -1UL. 93 * 94 * We assume the vma may be removed as part of the merge. 95 */ 96 bool 97 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, 98 struct anon_vma *anon_vma, struct file *file, 99 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 100 struct anon_vma_name *anon_name) 101 { 102 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, true) && 103 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { 104 if (vma->vm_pgoff == vm_pgoff) 105 return true; 106 } 107 return false; 108 } 109 110 /* 111 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 112 * beyond (at a higher virtual address and file offset than) the vma. 113 * 114 * We cannot merge two vmas if they have differently assigned (non-NULL) 115 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 116 * 117 * We assume that vma is not removed as part of the merge. 118 */ 119 bool 120 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, 121 struct anon_vma *anon_vma, struct file *file, 122 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 123 struct anon_vma_name *anon_name) 124 { 125 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, false) && 126 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { 127 pgoff_t vm_pglen; 128 129 vm_pglen = vma_pages(vma); 130 if (vma->vm_pgoff + vm_pglen == vm_pgoff) 131 return true; 132 } 133 return false; 134 } 135 136 /* 137 * Close a vm structure and free it. 138 */ 139 void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed) 140 { 141 might_sleep(); 142 if (!closed && vma->vm_ops && vma->vm_ops->close) 143 vma->vm_ops->close(vma); 144 if (vma->vm_file) 145 fput(vma->vm_file); 146 mpol_put(vma_policy(vma)); 147 if (unreachable) 148 __vm_area_free(vma); 149 else 150 vm_area_free(vma); 151 } 152 153 /* 154 * Get rid of page table information in the indicated region. 155 * 156 * Called with the mm semaphore held. 157 */ 158 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, 159 struct vm_area_struct *prev, struct vm_area_struct *next) 160 { 161 struct mm_struct *mm = vma->vm_mm; 162 struct mmu_gather tlb; 163 164 lru_add_drain(); 165 tlb_gather_mmu(&tlb, mm); 166 update_hiwater_rss(mm); 167 unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end, 168 /* mm_wr_locked = */ true); 169 mas_set(mas, vma->vm_end); 170 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 171 next ? next->vm_start : USER_PGTABLES_CEILING, 172 /* mm_wr_locked = */ true); 173 tlb_finish_mmu(&tlb); 174 } 175 176 /* 177 * __split_vma() bypasses sysctl_max_map_count checking. We use this where it 178 * has already been checked or doesn't make sense to fail. 179 * VMA Iterator will point to the original VMA. 180 */ 181 static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, 182 unsigned long addr, int new_below) 183 { 184 struct vma_prepare vp; 185 struct vm_area_struct *new; 186 int err; 187 188 WARN_ON(vma->vm_start >= addr); 189 WARN_ON(vma->vm_end <= addr); 190 191 if (vma->vm_ops && vma->vm_ops->may_split) { 192 err = vma->vm_ops->may_split(vma, addr); 193 if (err) 194 return err; 195 } 196 197 new = vm_area_dup(vma); 198 if (!new) 199 return -ENOMEM; 200 201 if (new_below) { 202 new->vm_end = addr; 203 } else { 204 new->vm_start = addr; 205 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); 206 } 207 208 err = -ENOMEM; 209 vma_iter_config(vmi, new->vm_start, new->vm_end); 210 if (vma_iter_prealloc(vmi, new)) 211 goto out_free_vma; 212 213 err = vma_dup_policy(vma, new); 214 if (err) 215 goto out_free_vmi; 216 217 err = anon_vma_clone(new, vma); 218 if (err) 219 goto out_free_mpol; 220 221 if (new->vm_file) 222 get_file(new->vm_file); 223 224 if (new->vm_ops && new->vm_ops->open) 225 new->vm_ops->open(new); 226 227 vma_start_write(vma); 228 vma_start_write(new); 229 230 init_vma_prep(&vp, vma); 231 vp.insert = new; 232 vma_prepare(&vp); 233 vma_adjust_trans_huge(vma, vma->vm_start, addr, 0); 234 235 if (new_below) { 236 vma->vm_start = addr; 237 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT; 238 } else { 239 vma->vm_end = addr; 240 } 241 242 /* vma_complete stores the new vma */ 243 vma_complete(&vp, vmi, vma->vm_mm); 244 validate_mm(vma->vm_mm); 245 246 /* Success. */ 247 if (new_below) 248 vma_next(vmi); 249 else 250 vma_prev(vmi); 251 252 return 0; 253 254 out_free_mpol: 255 mpol_put(vma_policy(new)); 256 out_free_vmi: 257 vma_iter_free(vmi); 258 out_free_vma: 259 vm_area_free(new); 260 return err; 261 } 262 263 /* 264 * Split a vma into two pieces at address 'addr', a new vma is allocated 265 * either for the first part or the tail. 266 */ 267 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, 268 unsigned long addr, int new_below) 269 { 270 if (vma->vm_mm->map_count >= sysctl_max_map_count) 271 return -ENOMEM; 272 273 return __split_vma(vmi, vma, addr, new_below); 274 } 275 276 /* 277 * init_vma_prep() - Initializer wrapper for vma_prepare struct 278 * @vp: The vma_prepare struct 279 * @vma: The vma that will be altered once locked 280 */ 281 void init_vma_prep(struct vma_prepare *vp, 282 struct vm_area_struct *vma) 283 { 284 init_multi_vma_prep(vp, vma, NULL, NULL, NULL); 285 } 286 287 /* 288 * Requires inode->i_mapping->i_mmap_rwsem 289 */ 290 static void __remove_shared_vm_struct(struct vm_area_struct *vma, 291 struct address_space *mapping) 292 { 293 if (vma_is_shared_maywrite(vma)) 294 mapping_unmap_writable(mapping); 295 296 flush_dcache_mmap_lock(mapping); 297 vma_interval_tree_remove(vma, &mapping->i_mmap); 298 flush_dcache_mmap_unlock(mapping); 299 } 300 301 /* 302 * vma has some anon_vma assigned, and is already inserted on that 303 * anon_vma's interval trees. 304 * 305 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the 306 * vma must be removed from the anon_vma's interval trees using 307 * anon_vma_interval_tree_pre_update_vma(). 308 * 309 * After the update, the vma will be reinserted using 310 * anon_vma_interval_tree_post_update_vma(). 311 * 312 * The entire update must be protected by exclusive mmap_lock and by 313 * the root anon_vma's mutex. 314 */ 315 void 316 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) 317 { 318 struct anon_vma_chain *avc; 319 320 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 321 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); 322 } 323 324 void 325 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) 326 { 327 struct anon_vma_chain *avc; 328 329 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 330 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); 331 } 332 333 static void __vma_link_file(struct vm_area_struct *vma, 334 struct address_space *mapping) 335 { 336 if (vma_is_shared_maywrite(vma)) 337 mapping_allow_writable(mapping); 338 339 flush_dcache_mmap_lock(mapping); 340 vma_interval_tree_insert(vma, &mapping->i_mmap); 341 flush_dcache_mmap_unlock(mapping); 342 } 343 344 /* 345 * vma_prepare() - Helper function for handling locking VMAs prior to altering 346 * @vp: The initialized vma_prepare struct 347 */ 348 void vma_prepare(struct vma_prepare *vp) 349 { 350 if (vp->file) { 351 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end); 352 353 if (vp->adj_next) 354 uprobe_munmap(vp->adj_next, vp->adj_next->vm_start, 355 vp->adj_next->vm_end); 356 357 i_mmap_lock_write(vp->mapping); 358 if (vp->insert && vp->insert->vm_file) { 359 /* 360 * Put into interval tree now, so instantiated pages 361 * are visible to arm/parisc __flush_dcache_page 362 * throughout; but we cannot insert into address 363 * space until vma start or end is updated. 364 */ 365 __vma_link_file(vp->insert, 366 vp->insert->vm_file->f_mapping); 367 } 368 } 369 370 if (vp->anon_vma) { 371 anon_vma_lock_write(vp->anon_vma); 372 anon_vma_interval_tree_pre_update_vma(vp->vma); 373 if (vp->adj_next) 374 anon_vma_interval_tree_pre_update_vma(vp->adj_next); 375 } 376 377 if (vp->file) { 378 flush_dcache_mmap_lock(vp->mapping); 379 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap); 380 if (vp->adj_next) 381 vma_interval_tree_remove(vp->adj_next, 382 &vp->mapping->i_mmap); 383 } 384 385 } 386 387 /* 388 * dup_anon_vma() - Helper function to duplicate anon_vma 389 * @dst: The destination VMA 390 * @src: The source VMA 391 * @dup: Pointer to the destination VMA when successful. 392 * 393 * Returns: 0 on success. 394 */ 395 static int dup_anon_vma(struct vm_area_struct *dst, 396 struct vm_area_struct *src, struct vm_area_struct **dup) 397 { 398 /* 399 * Easily overlooked: when mprotect shifts the boundary, make sure the 400 * expanding vma has anon_vma set if the shrinking vma had, to cover any 401 * anon pages imported. 402 */ 403 if (src->anon_vma && !dst->anon_vma) { 404 int ret; 405 406 vma_assert_write_locked(dst); 407 dst->anon_vma = src->anon_vma; 408 ret = anon_vma_clone(dst, src); 409 if (ret) 410 return ret; 411 412 *dup = dst; 413 } 414 415 return 0; 416 } 417 418 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 419 void validate_mm(struct mm_struct *mm) 420 { 421 int bug = 0; 422 int i = 0; 423 struct vm_area_struct *vma; 424 VMA_ITERATOR(vmi, mm, 0); 425 426 mt_validate(&mm->mm_mt); 427 for_each_vma(vmi, vma) { 428 #ifdef CONFIG_DEBUG_VM_RB 429 struct anon_vma *anon_vma = vma->anon_vma; 430 struct anon_vma_chain *avc; 431 #endif 432 unsigned long vmi_start, vmi_end; 433 bool warn = 0; 434 435 vmi_start = vma_iter_addr(&vmi); 436 vmi_end = vma_iter_end(&vmi); 437 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm)) 438 warn = 1; 439 440 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm)) 441 warn = 1; 442 443 if (warn) { 444 pr_emerg("issue in %s\n", current->comm); 445 dump_stack(); 446 dump_vma(vma); 447 pr_emerg("tree range: %px start %lx end %lx\n", vma, 448 vmi_start, vmi_end - 1); 449 vma_iter_dump_tree(&vmi); 450 } 451 452 #ifdef CONFIG_DEBUG_VM_RB 453 if (anon_vma) { 454 anon_vma_lock_read(anon_vma); 455 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 456 anon_vma_interval_tree_verify(avc); 457 anon_vma_unlock_read(anon_vma); 458 } 459 #endif 460 i++; 461 } 462 if (i != mm->map_count) { 463 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i); 464 bug = 1; 465 } 466 VM_BUG_ON_MM(bug, mm); 467 } 468 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ 469 470 /* 471 * vma_expand - Expand an existing VMA 472 * 473 * @vmi: The vma iterator 474 * @vma: The vma to expand 475 * @start: The start of the vma 476 * @end: The exclusive end of the vma 477 * @pgoff: The page offset of vma 478 * @next: The current of next vma. 479 * 480 * Expand @vma to @start and @end. Can expand off the start and end. Will 481 * expand over @next if it's different from @vma and @end == @next->vm_end. 482 * Checking if the @vma can expand and merge with @next needs to be handled by 483 * the caller. 484 * 485 * Returns: 0 on success 486 */ 487 int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma, 488 unsigned long start, unsigned long end, pgoff_t pgoff, 489 struct vm_area_struct *next) 490 { 491 struct vm_area_struct *anon_dup = NULL; 492 bool remove_next = false; 493 struct vma_prepare vp; 494 495 vma_start_write(vma); 496 if (next && (vma != next) && (end == next->vm_end)) { 497 int ret; 498 499 remove_next = true; 500 vma_start_write(next); 501 ret = dup_anon_vma(vma, next, &anon_dup); 502 if (ret) 503 return ret; 504 } 505 506 init_multi_vma_prep(&vp, vma, NULL, remove_next ? next : NULL, NULL); 507 /* Not merging but overwriting any part of next is not handled. */ 508 VM_WARN_ON(next && !vp.remove && 509 next != vma && end > next->vm_start); 510 /* Only handles expanding */ 511 VM_WARN_ON(vma->vm_start < start || vma->vm_end > end); 512 513 /* Note: vma iterator must be pointing to 'start' */ 514 vma_iter_config(vmi, start, end); 515 if (vma_iter_prealloc(vmi, vma)) 516 goto nomem; 517 518 vma_prepare(&vp); 519 vma_adjust_trans_huge(vma, start, end, 0); 520 vma_set_range(vma, start, end, pgoff); 521 vma_iter_store(vmi, vma); 522 523 vma_complete(&vp, vmi, vma->vm_mm); 524 return 0; 525 526 nomem: 527 if (anon_dup) 528 unlink_anon_vmas(anon_dup); 529 return -ENOMEM; 530 } 531 532 /* 533 * vma_shrink() - Reduce an existing VMAs memory area 534 * @vmi: The vma iterator 535 * @vma: The VMA to modify 536 * @start: The new start 537 * @end: The new end 538 * 539 * Returns: 0 on success, -ENOMEM otherwise 540 */ 541 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, 542 unsigned long start, unsigned long end, pgoff_t pgoff) 543 { 544 struct vma_prepare vp; 545 546 WARN_ON((vma->vm_start != start) && (vma->vm_end != end)); 547 548 if (vma->vm_start < start) 549 vma_iter_config(vmi, vma->vm_start, start); 550 else 551 vma_iter_config(vmi, end, vma->vm_end); 552 553 if (vma_iter_prealloc(vmi, NULL)) 554 return -ENOMEM; 555 556 vma_start_write(vma); 557 558 init_vma_prep(&vp, vma); 559 vma_prepare(&vp); 560 vma_adjust_trans_huge(vma, start, end, 0); 561 562 vma_iter_clear(vmi); 563 vma_set_range(vma, start, end, pgoff); 564 vma_complete(&vp, vmi, vma->vm_mm); 565 validate_mm(vma->vm_mm); 566 return 0; 567 } 568 569 /* 570 * vma_complete- Helper function for handling the unlocking after altering VMAs, 571 * or for inserting a VMA. 572 * 573 * @vp: The vma_prepare struct 574 * @vmi: The vma iterator 575 * @mm: The mm_struct 576 */ 577 void vma_complete(struct vma_prepare *vp, 578 struct vma_iterator *vmi, struct mm_struct *mm) 579 { 580 if (vp->file) { 581 if (vp->adj_next) 582 vma_interval_tree_insert(vp->adj_next, 583 &vp->mapping->i_mmap); 584 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap); 585 flush_dcache_mmap_unlock(vp->mapping); 586 } 587 588 if (vp->remove && vp->file) { 589 __remove_shared_vm_struct(vp->remove, vp->mapping); 590 if (vp->remove2) 591 __remove_shared_vm_struct(vp->remove2, vp->mapping); 592 } else if (vp->insert) { 593 /* 594 * split_vma has split insert from vma, and needs 595 * us to insert it before dropping the locks 596 * (it may either follow vma or precede it). 597 */ 598 vma_iter_store(vmi, vp->insert); 599 mm->map_count++; 600 } 601 602 if (vp->anon_vma) { 603 anon_vma_interval_tree_post_update_vma(vp->vma); 604 if (vp->adj_next) 605 anon_vma_interval_tree_post_update_vma(vp->adj_next); 606 anon_vma_unlock_write(vp->anon_vma); 607 } 608 609 if (vp->file) { 610 i_mmap_unlock_write(vp->mapping); 611 uprobe_mmap(vp->vma); 612 613 if (vp->adj_next) 614 uprobe_mmap(vp->adj_next); 615 } 616 617 if (vp->remove) { 618 again: 619 vma_mark_detached(vp->remove, true); 620 if (vp->file) { 621 uprobe_munmap(vp->remove, vp->remove->vm_start, 622 vp->remove->vm_end); 623 fput(vp->file); 624 } 625 if (vp->remove->anon_vma) 626 anon_vma_merge(vp->vma, vp->remove); 627 mm->map_count--; 628 mpol_put(vma_policy(vp->remove)); 629 if (!vp->remove2) 630 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end); 631 vm_area_free(vp->remove); 632 633 /* 634 * In mprotect's case 6 (see comments on vma_merge), 635 * we are removing both mid and next vmas 636 */ 637 if (vp->remove2) { 638 vp->remove = vp->remove2; 639 vp->remove2 = NULL; 640 goto again; 641 } 642 } 643 if (vp->insert && vp->file) 644 uprobe_mmap(vp->insert); 645 } 646 647 static inline void vms_clear_ptes(struct vma_munmap_struct *vms, 648 struct ma_state *mas_detach, bool mm_wr_locked) 649 { 650 struct mmu_gather tlb; 651 652 if (!vms->clear_ptes) /* Nothing to do */ 653 return; 654 655 /* 656 * We can free page tables without write-locking mmap_lock because VMAs 657 * were isolated before we downgraded mmap_lock. 658 */ 659 mas_set(mas_detach, 1); 660 lru_add_drain(); 661 tlb_gather_mmu(&tlb, vms->vma->vm_mm); 662 update_hiwater_rss(vms->vma->vm_mm); 663 unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end, 664 vms->vma_count, mm_wr_locked); 665 666 mas_set(mas_detach, 1); 667 /* start and end may be different if there is no prev or next vma. */ 668 free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start, 669 vms->unmap_end, mm_wr_locked); 670 tlb_finish_mmu(&tlb); 671 vms->clear_ptes = false; 672 } 673 674 void vms_clean_up_area(struct vma_munmap_struct *vms, 675 struct ma_state *mas_detach) 676 { 677 struct vm_area_struct *vma; 678 679 if (!vms->nr_pages) 680 return; 681 682 vms_clear_ptes(vms, mas_detach, true); 683 mas_set(mas_detach, 0); 684 mas_for_each(mas_detach, vma, ULONG_MAX) 685 if (vma->vm_ops && vma->vm_ops->close) 686 vma->vm_ops->close(vma); 687 vms->closed_vm_ops = true; 688 } 689 690 /* 691 * vms_complete_munmap_vmas() - Finish the munmap() operation 692 * @vms: The vma munmap struct 693 * @mas_detach: The maple state of the detached vmas 694 * 695 * This updates the mm_struct, unmaps the region, frees the resources 696 * used for the munmap() and may downgrade the lock - if requested. Everything 697 * needed to be done once the vma maple tree is updated. 698 */ 699 void vms_complete_munmap_vmas(struct vma_munmap_struct *vms, 700 struct ma_state *mas_detach) 701 { 702 struct vm_area_struct *vma; 703 struct mm_struct *mm; 704 705 mm = current->mm; 706 mm->map_count -= vms->vma_count; 707 mm->locked_vm -= vms->locked_vm; 708 if (vms->unlock) 709 mmap_write_downgrade(mm); 710 711 if (!vms->nr_pages) 712 return; 713 714 vms_clear_ptes(vms, mas_detach, !vms->unlock); 715 /* Update high watermark before we lower total_vm */ 716 update_hiwater_vm(mm); 717 /* Stat accounting */ 718 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages); 719 /* Paranoid bookkeeping */ 720 VM_WARN_ON(vms->exec_vm > mm->exec_vm); 721 VM_WARN_ON(vms->stack_vm > mm->stack_vm); 722 VM_WARN_ON(vms->data_vm > mm->data_vm); 723 mm->exec_vm -= vms->exec_vm; 724 mm->stack_vm -= vms->stack_vm; 725 mm->data_vm -= vms->data_vm; 726 727 /* Remove and clean up vmas */ 728 mas_set(mas_detach, 0); 729 mas_for_each(mas_detach, vma, ULONG_MAX) 730 remove_vma(vma, /* = */ false, vms->closed_vm_ops); 731 732 vm_unacct_memory(vms->nr_accounted); 733 validate_mm(mm); 734 if (vms->unlock) 735 mmap_read_unlock(mm); 736 737 __mt_destroy(mas_detach->tree); 738 } 739 740 /* 741 * vms_gather_munmap_vmas() - Put all VMAs within a range into a maple tree 742 * for removal at a later date. Handles splitting first and last if necessary 743 * and marking the vmas as isolated. 744 * 745 * @vms: The vma munmap struct 746 * @mas_detach: The maple state tracking the detached tree 747 * 748 * Return: 0 on success, -EPERM on mseal vmas, -ENOMEM otherwise 749 */ 750 int vms_gather_munmap_vmas(struct vma_munmap_struct *vms, 751 struct ma_state *mas_detach) 752 { 753 struct vm_area_struct *next = NULL; 754 int error = -ENOMEM; 755 756 /* 757 * If we need to split any vma, do it now to save pain later. 758 * 759 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially 760 * unmapped vm_area_struct will remain in use: so lower split_vma 761 * places tmp vma above, and higher split_vma places tmp vma below. 762 */ 763 764 /* Does it split the first one? */ 765 if (vms->start > vms->vma->vm_start) { 766 767 /* 768 * Make sure that map_count on return from munmap() will 769 * not exceed its limit; but let map_count go just above 770 * its limit temporarily, to help free resources as expected. 771 */ 772 if (vms->end < vms->vma->vm_end && 773 vms->vma->vm_mm->map_count >= sysctl_max_map_count) 774 goto map_count_exceeded; 775 776 /* Don't bother splitting the VMA if we can't unmap it anyway */ 777 if (!can_modify_vma(vms->vma)) { 778 error = -EPERM; 779 goto start_split_failed; 780 } 781 782 if (__split_vma(vms->vmi, vms->vma, vms->start, 1)) 783 goto start_split_failed; 784 } 785 vms->prev = vma_prev(vms->vmi); 786 if (vms->prev) 787 vms->unmap_start = vms->prev->vm_end; 788 789 /* 790 * Detach a range of VMAs from the mm. Using next as a temp variable as 791 * it is always overwritten. 792 */ 793 for_each_vma_range(*(vms->vmi), next, vms->end) { 794 long nrpages; 795 796 if (!can_modify_vma(next)) { 797 error = -EPERM; 798 goto modify_vma_failed; 799 } 800 /* Does it split the end? */ 801 if (next->vm_end > vms->end) { 802 if (__split_vma(vms->vmi, next, vms->end, 0)) 803 goto end_split_failed; 804 } 805 vma_start_write(next); 806 mas_set(mas_detach, vms->vma_count++); 807 if (mas_store_gfp(mas_detach, next, GFP_KERNEL)) 808 goto munmap_gather_failed; 809 810 vma_mark_detached(next, true); 811 nrpages = vma_pages(next); 812 813 vms->nr_pages += nrpages; 814 if (next->vm_flags & VM_LOCKED) 815 vms->locked_vm += nrpages; 816 817 if (next->vm_flags & VM_ACCOUNT) 818 vms->nr_accounted += nrpages; 819 820 if (is_exec_mapping(next->vm_flags)) 821 vms->exec_vm += nrpages; 822 else if (is_stack_mapping(next->vm_flags)) 823 vms->stack_vm += nrpages; 824 else if (is_data_mapping(next->vm_flags)) 825 vms->data_vm += nrpages; 826 827 if (unlikely(vms->uf)) { 828 /* 829 * If userfaultfd_unmap_prep returns an error the vmas 830 * will remain split, but userland will get a 831 * highly unexpected error anyway. This is no 832 * different than the case where the first of the two 833 * __split_vma fails, but we don't undo the first 834 * split, despite we could. This is unlikely enough 835 * failure that it's not worth optimizing it for. 836 */ 837 if (userfaultfd_unmap_prep(next, vms->start, vms->end, 838 vms->uf)) 839 goto userfaultfd_error; 840 } 841 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 842 BUG_ON(next->vm_start < vms->start); 843 BUG_ON(next->vm_start > vms->end); 844 #endif 845 } 846 847 vms->next = vma_next(vms->vmi); 848 if (vms->next) 849 vms->unmap_end = vms->next->vm_start; 850 851 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 852 /* Make sure no VMAs are about to be lost. */ 853 { 854 MA_STATE(test, mas_detach->tree, 0, 0); 855 struct vm_area_struct *vma_mas, *vma_test; 856 int test_count = 0; 857 858 vma_iter_set(vms->vmi, vms->start); 859 rcu_read_lock(); 860 vma_test = mas_find(&test, vms->vma_count - 1); 861 for_each_vma_range(*(vms->vmi), vma_mas, vms->end) { 862 BUG_ON(vma_mas != vma_test); 863 test_count++; 864 vma_test = mas_next(&test, vms->vma_count - 1); 865 } 866 rcu_read_unlock(); 867 BUG_ON(vms->vma_count != test_count); 868 } 869 #endif 870 871 while (vma_iter_addr(vms->vmi) > vms->start) 872 vma_iter_prev_range(vms->vmi); 873 874 vms->clear_ptes = true; 875 return 0; 876 877 userfaultfd_error: 878 munmap_gather_failed: 879 end_split_failed: 880 modify_vma_failed: 881 reattach_vmas(mas_detach); 882 start_split_failed: 883 map_count_exceeded: 884 return error; 885 } 886 887 /* 888 * do_vmi_align_munmap() - munmap the aligned region from @start to @end. 889 * @vmi: The vma iterator 890 * @vma: The starting vm_area_struct 891 * @mm: The mm_struct 892 * @start: The aligned start address to munmap. 893 * @end: The aligned end address to munmap. 894 * @uf: The userfaultfd list_head 895 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on 896 * success. 897 * 898 * Return: 0 on success and drops the lock if so directed, error and leaves the 899 * lock held otherwise. 900 */ 901 int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 902 struct mm_struct *mm, unsigned long start, unsigned long end, 903 struct list_head *uf, bool unlock) 904 { 905 struct maple_tree mt_detach; 906 MA_STATE(mas_detach, &mt_detach, 0, 0); 907 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); 908 mt_on_stack(mt_detach); 909 struct vma_munmap_struct vms; 910 int error; 911 912 init_vma_munmap(&vms, vmi, vma, start, end, uf, unlock); 913 error = vms_gather_munmap_vmas(&vms, &mas_detach); 914 if (error) 915 goto gather_failed; 916 917 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL); 918 if (error) 919 goto clear_tree_failed; 920 921 /* Point of no return */ 922 vms_complete_munmap_vmas(&vms, &mas_detach); 923 return 0; 924 925 clear_tree_failed: 926 reattach_vmas(&mas_detach); 927 gather_failed: 928 validate_mm(mm); 929 return error; 930 } 931 932 /* 933 * do_vmi_munmap() - munmap a given range. 934 * @vmi: The vma iterator 935 * @mm: The mm_struct 936 * @start: The start address to munmap 937 * @len: The length of the range to munmap 938 * @uf: The userfaultfd list_head 939 * @unlock: set to true if the user wants to drop the mmap_lock on success 940 * 941 * This function takes a @mas that is either pointing to the previous VMA or set 942 * to MA_START and sets it up to remove the mapping(s). The @len will be 943 * aligned. 944 * 945 * Return: 0 on success and drops the lock if so directed, error and leaves the 946 * lock held otherwise. 947 */ 948 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, 949 unsigned long start, size_t len, struct list_head *uf, 950 bool unlock) 951 { 952 unsigned long end; 953 struct vm_area_struct *vma; 954 955 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) 956 return -EINVAL; 957 958 end = start + PAGE_ALIGN(len); 959 if (end == start) 960 return -EINVAL; 961 962 /* Find the first overlapping VMA */ 963 vma = vma_find(vmi, end); 964 if (!vma) { 965 if (unlock) 966 mmap_write_unlock(mm); 967 return 0; 968 } 969 970 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); 971 } 972 973 /* 974 * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name), 975 * figure out whether that can be merged with its predecessor or its 976 * successor. Or both (it neatly fills a hole). 977 * 978 * In most cases - when called for mmap, brk or mremap - [addr,end) is 979 * certain not to be mapped by the time vma_merge is called; but when 980 * called for mprotect, it is certain to be already mapped (either at 981 * an offset within prev, or at the start of next), and the flags of 982 * this area are about to be changed to vm_flags - and the no-change 983 * case has already been eliminated. 984 * 985 * The following mprotect cases have to be considered, where **** is 986 * the area passed down from mprotect_fixup, never extending beyond one 987 * vma, PPPP is the previous vma, CCCC is a concurrent vma that starts 988 * at the same address as **** and is of the same or larger span, and 989 * NNNN the next vma after ****: 990 * 991 * **** **** **** 992 * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPCCCCCC 993 * cannot merge might become might become 994 * PPNNNNNNNNNN PPPPPPPPPPCC 995 * mmap, brk or case 4 below case 5 below 996 * mremap move: 997 * **** **** 998 * PPPP NNNN PPPPCCCCNNNN 999 * might become might become 1000 * PPPPPPPPPPPP 1 or PPPPPPPPPPPP 6 or 1001 * PPPPPPPPNNNN 2 or PPPPPPPPNNNN 7 or 1002 * PPPPNNNNNNNN 3 PPPPNNNNNNNN 8 1003 * 1004 * It is important for case 8 that the vma CCCC overlapping the 1005 * region **** is never going to extended over NNNN. Instead NNNN must 1006 * be extended in region **** and CCCC must be removed. This way in 1007 * all cases where vma_merge succeeds, the moment vma_merge drops the 1008 * rmap_locks, the properties of the merged vma will be already 1009 * correct for the whole merged range. Some of those properties like 1010 * vm_page_prot/vm_flags may be accessed by rmap_walks and they must 1011 * be correct for the whole merged range immediately after the 1012 * rmap_locks are released. Otherwise if NNNN would be removed and 1013 * CCCC would be extended over the NNNN range, remove_migration_ptes 1014 * or other rmap walkers (if working on addresses beyond the "end" 1015 * parameter) may establish ptes with the wrong permissions of CCCC 1016 * instead of the right permissions of NNNN. 1017 * 1018 * In the code below: 1019 * PPPP is represented by *prev 1020 * CCCC is represented by *curr or not represented at all (NULL) 1021 * NNNN is represented by *next or not represented at all (NULL) 1022 * **** is not represented - it will be merged and the vma containing the 1023 * area is returned, or the function will return NULL 1024 */ 1025 static struct vm_area_struct 1026 *vma_merge(struct vma_iterator *vmi, struct vm_area_struct *prev, 1027 struct vm_area_struct *src, unsigned long addr, unsigned long end, 1028 unsigned long vm_flags, pgoff_t pgoff, struct mempolicy *policy, 1029 struct vm_userfaultfd_ctx vm_userfaultfd_ctx, 1030 struct anon_vma_name *anon_name) 1031 { 1032 struct mm_struct *mm = src->vm_mm; 1033 struct anon_vma *anon_vma = src->anon_vma; 1034 struct file *file = src->vm_file; 1035 struct vm_area_struct *curr, *next, *res; 1036 struct vm_area_struct *vma, *adjust, *remove, *remove2; 1037 struct vm_area_struct *anon_dup = NULL; 1038 struct vma_prepare vp; 1039 pgoff_t vma_pgoff; 1040 int err = 0; 1041 bool merge_prev = false; 1042 bool merge_next = false; 1043 bool vma_expanded = false; 1044 unsigned long vma_start = addr; 1045 unsigned long vma_end = end; 1046 pgoff_t pglen = (end - addr) >> PAGE_SHIFT; 1047 long adj_start = 0; 1048 1049 /* 1050 * We later require that vma->vm_flags == vm_flags, 1051 * so this tests vma->vm_flags & VM_SPECIAL, too. 1052 */ 1053 if (vm_flags & VM_SPECIAL) 1054 return NULL; 1055 1056 /* Does the input range span an existing VMA? (cases 5 - 8) */ 1057 curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end); 1058 1059 if (!curr || /* cases 1 - 4 */ 1060 end == curr->vm_end) /* cases 6 - 8, adjacent VMA */ 1061 next = vma_lookup(mm, end); 1062 else 1063 next = NULL; /* case 5 */ 1064 1065 if (prev) { 1066 vma_start = prev->vm_start; 1067 vma_pgoff = prev->vm_pgoff; 1068 1069 /* Can we merge the predecessor? */ 1070 if (addr == prev->vm_end && mpol_equal(vma_policy(prev), policy) 1071 && can_vma_merge_after(prev, vm_flags, anon_vma, file, 1072 pgoff, vm_userfaultfd_ctx, anon_name)) { 1073 merge_prev = true; 1074 vma_prev(vmi); 1075 } 1076 } 1077 1078 /* Can we merge the successor? */ 1079 if (next && mpol_equal(policy, vma_policy(next)) && 1080 can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen, 1081 vm_userfaultfd_ctx, anon_name)) { 1082 merge_next = true; 1083 } 1084 1085 /* Verify some invariant that must be enforced by the caller. */ 1086 VM_WARN_ON(prev && addr <= prev->vm_start); 1087 VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end)); 1088 VM_WARN_ON(addr >= end); 1089 1090 if (!merge_prev && !merge_next) 1091 return NULL; /* Not mergeable. */ 1092 1093 if (merge_prev) 1094 vma_start_write(prev); 1095 1096 res = vma = prev; 1097 remove = remove2 = adjust = NULL; 1098 1099 /* Can we merge both the predecessor and the successor? */ 1100 if (merge_prev && merge_next && 1101 is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) { 1102 vma_start_write(next); 1103 remove = next; /* case 1 */ 1104 vma_end = next->vm_end; 1105 err = dup_anon_vma(prev, next, &anon_dup); 1106 if (curr) { /* case 6 */ 1107 vma_start_write(curr); 1108 remove = curr; 1109 remove2 = next; 1110 /* 1111 * Note that the dup_anon_vma below cannot overwrite err 1112 * since the first caller would do nothing unless next 1113 * has an anon_vma. 1114 */ 1115 if (!next->anon_vma) 1116 err = dup_anon_vma(prev, curr, &anon_dup); 1117 } 1118 } else if (merge_prev) { /* case 2 */ 1119 if (curr) { 1120 vma_start_write(curr); 1121 if (end == curr->vm_end) { /* case 7 */ 1122 /* 1123 * can_vma_merge_after() assumed we would not be 1124 * removing prev vma, so it skipped the check 1125 * for vm_ops->close, but we are removing curr 1126 */ 1127 if (curr->vm_ops && curr->vm_ops->close) 1128 err = -EINVAL; 1129 remove = curr; 1130 } else { /* case 5 */ 1131 adjust = curr; 1132 adj_start = (end - curr->vm_start); 1133 } 1134 if (!err) 1135 err = dup_anon_vma(prev, curr, &anon_dup); 1136 } 1137 } else { /* merge_next */ 1138 vma_start_write(next); 1139 res = next; 1140 if (prev && addr < prev->vm_end) { /* case 4 */ 1141 vma_start_write(prev); 1142 vma_end = addr; 1143 adjust = next; 1144 adj_start = -(prev->vm_end - addr); 1145 err = dup_anon_vma(next, prev, &anon_dup); 1146 } else { 1147 /* 1148 * Note that cases 3 and 8 are the ONLY ones where prev 1149 * is permitted to be (but is not necessarily) NULL. 1150 */ 1151 vma = next; /* case 3 */ 1152 vma_start = addr; 1153 vma_end = next->vm_end; 1154 vma_pgoff = next->vm_pgoff - pglen; 1155 if (curr) { /* case 8 */ 1156 vma_pgoff = curr->vm_pgoff; 1157 vma_start_write(curr); 1158 remove = curr; 1159 err = dup_anon_vma(next, curr, &anon_dup); 1160 } 1161 } 1162 } 1163 1164 /* Error in anon_vma clone. */ 1165 if (err) 1166 goto anon_vma_fail; 1167 1168 if (vma_start < vma->vm_start || vma_end > vma->vm_end) 1169 vma_expanded = true; 1170 1171 if (vma_expanded) { 1172 vma_iter_config(vmi, vma_start, vma_end); 1173 } else { 1174 vma_iter_config(vmi, adjust->vm_start + adj_start, 1175 adjust->vm_end); 1176 } 1177 1178 if (vma_iter_prealloc(vmi, vma)) 1179 goto prealloc_fail; 1180 1181 init_multi_vma_prep(&vp, vma, adjust, remove, remove2); 1182 VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma && 1183 vp.anon_vma != adjust->anon_vma); 1184 1185 vma_prepare(&vp); 1186 vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start); 1187 vma_set_range(vma, vma_start, vma_end, vma_pgoff); 1188 1189 if (vma_expanded) 1190 vma_iter_store(vmi, vma); 1191 1192 if (adj_start) { 1193 adjust->vm_start += adj_start; 1194 adjust->vm_pgoff += adj_start >> PAGE_SHIFT; 1195 if (adj_start < 0) { 1196 WARN_ON(vma_expanded); 1197 vma_iter_store(vmi, next); 1198 } 1199 } 1200 1201 vma_complete(&vp, vmi, mm); 1202 validate_mm(mm); 1203 khugepaged_enter_vma(res, vm_flags); 1204 return res; 1205 1206 prealloc_fail: 1207 if (anon_dup) 1208 unlink_anon_vmas(anon_dup); 1209 1210 anon_vma_fail: 1211 vma_iter_set(vmi, addr); 1212 vma_iter_load(vmi); 1213 return NULL; 1214 } 1215 1216 /* 1217 * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd 1218 * context and anonymous VMA name within the range [start, end). 1219 * 1220 * As a result, we might be able to merge the newly modified VMA range with an 1221 * adjacent VMA with identical properties. 1222 * 1223 * If no merge is possible and the range does not span the entirety of the VMA, 1224 * we then need to split the VMA to accommodate the change. 1225 * 1226 * The function returns either the merged VMA, the original VMA if a split was 1227 * required instead, or an error if the split failed. 1228 */ 1229 struct vm_area_struct *vma_modify(struct vma_iterator *vmi, 1230 struct vm_area_struct *prev, 1231 struct vm_area_struct *vma, 1232 unsigned long start, unsigned long end, 1233 unsigned long vm_flags, 1234 struct mempolicy *policy, 1235 struct vm_userfaultfd_ctx uffd_ctx, 1236 struct anon_vma_name *anon_name) 1237 { 1238 pgoff_t pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 1239 struct vm_area_struct *merged; 1240 1241 merged = vma_merge(vmi, prev, vma, start, end, vm_flags, 1242 pgoff, policy, uffd_ctx, anon_name); 1243 if (merged) 1244 return merged; 1245 1246 if (vma->vm_start < start) { 1247 int err = split_vma(vmi, vma, start, 1); 1248 1249 if (err) 1250 return ERR_PTR(err); 1251 } 1252 1253 if (vma->vm_end > end) { 1254 int err = split_vma(vmi, vma, end, 0); 1255 1256 if (err) 1257 return ERR_PTR(err); 1258 } 1259 1260 return vma; 1261 } 1262 1263 /* 1264 * Attempt to merge a newly mapped VMA with those adjacent to it. The caller 1265 * must ensure that [start, end) does not overlap any existing VMA. 1266 */ 1267 struct vm_area_struct 1268 *vma_merge_new_vma(struct vma_iterator *vmi, struct vm_area_struct *prev, 1269 struct vm_area_struct *vma, unsigned long start, 1270 unsigned long end, pgoff_t pgoff) 1271 { 1272 return vma_merge(vmi, prev, vma, start, end, vma->vm_flags, pgoff, 1273 vma_policy(vma), vma->vm_userfaultfd_ctx, anon_vma_name(vma)); 1274 } 1275 1276 /* 1277 * Expand vma by delta bytes, potentially merging with an immediately adjacent 1278 * VMA with identical properties. 1279 */ 1280 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi, 1281 struct vm_area_struct *vma, 1282 unsigned long delta) 1283 { 1284 pgoff_t pgoff = vma->vm_pgoff + vma_pages(vma); 1285 1286 /* vma is specified as prev, so case 1 or 2 will apply. */ 1287 return vma_merge(vmi, vma, vma, vma->vm_end, vma->vm_end + delta, 1288 vma->vm_flags, pgoff, vma_policy(vma), 1289 vma->vm_userfaultfd_ctx, anon_vma_name(vma)); 1290 } 1291 1292 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb) 1293 { 1294 vb->count = 0; 1295 } 1296 1297 static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb) 1298 { 1299 struct address_space *mapping; 1300 int i; 1301 1302 mapping = vb->vmas[0]->vm_file->f_mapping; 1303 i_mmap_lock_write(mapping); 1304 for (i = 0; i < vb->count; i++) { 1305 VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping); 1306 __remove_shared_vm_struct(vb->vmas[i], mapping); 1307 } 1308 i_mmap_unlock_write(mapping); 1309 1310 unlink_file_vma_batch_init(vb); 1311 } 1312 1313 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb, 1314 struct vm_area_struct *vma) 1315 { 1316 if (vma->vm_file == NULL) 1317 return; 1318 1319 if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) || 1320 vb->count == ARRAY_SIZE(vb->vmas)) 1321 unlink_file_vma_batch_process(vb); 1322 1323 vb->vmas[vb->count] = vma; 1324 vb->count++; 1325 } 1326 1327 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb) 1328 { 1329 if (vb->count > 0) 1330 unlink_file_vma_batch_process(vb); 1331 } 1332 1333 /* 1334 * Unlink a file-based vm structure from its interval tree, to hide 1335 * vma from rmap and vmtruncate before freeing its page tables. 1336 */ 1337 void unlink_file_vma(struct vm_area_struct *vma) 1338 { 1339 struct file *file = vma->vm_file; 1340 1341 if (file) { 1342 struct address_space *mapping = file->f_mapping; 1343 1344 i_mmap_lock_write(mapping); 1345 __remove_shared_vm_struct(vma, mapping); 1346 i_mmap_unlock_write(mapping); 1347 } 1348 } 1349 1350 void vma_link_file(struct vm_area_struct *vma) 1351 { 1352 struct file *file = vma->vm_file; 1353 struct address_space *mapping; 1354 1355 if (file) { 1356 mapping = file->f_mapping; 1357 i_mmap_lock_write(mapping); 1358 __vma_link_file(vma, mapping); 1359 i_mmap_unlock_write(mapping); 1360 } 1361 } 1362 1363 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) 1364 { 1365 VMA_ITERATOR(vmi, mm, 0); 1366 1367 vma_iter_config(&vmi, vma->vm_start, vma->vm_end); 1368 if (vma_iter_prealloc(&vmi, vma)) 1369 return -ENOMEM; 1370 1371 vma_start_write(vma); 1372 vma_iter_store(&vmi, vma); 1373 vma_link_file(vma); 1374 mm->map_count++; 1375 validate_mm(mm); 1376 return 0; 1377 } 1378 1379 /* 1380 * Copy the vma structure to a new location in the same mm, 1381 * prior to moving page table entries, to effect an mremap move. 1382 */ 1383 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 1384 unsigned long addr, unsigned long len, pgoff_t pgoff, 1385 bool *need_rmap_locks) 1386 { 1387 struct vm_area_struct *vma = *vmap; 1388 unsigned long vma_start = vma->vm_start; 1389 struct mm_struct *mm = vma->vm_mm; 1390 struct vm_area_struct *new_vma, *prev; 1391 bool faulted_in_anon_vma = true; 1392 VMA_ITERATOR(vmi, mm, addr); 1393 1394 /* 1395 * If anonymous vma has not yet been faulted, update new pgoff 1396 * to match new location, to increase its chance of merging. 1397 */ 1398 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { 1399 pgoff = addr >> PAGE_SHIFT; 1400 faulted_in_anon_vma = false; 1401 } 1402 1403 new_vma = find_vma_prev(mm, addr, &prev); 1404 if (new_vma && new_vma->vm_start < addr + len) 1405 return NULL; /* should never get here */ 1406 1407 new_vma = vma_merge_new_vma(&vmi, prev, vma, addr, addr + len, pgoff); 1408 if (new_vma) { 1409 /* 1410 * Source vma may have been merged into new_vma 1411 */ 1412 if (unlikely(vma_start >= new_vma->vm_start && 1413 vma_start < new_vma->vm_end)) { 1414 /* 1415 * The only way we can get a vma_merge with 1416 * self during an mremap is if the vma hasn't 1417 * been faulted in yet and we were allowed to 1418 * reset the dst vma->vm_pgoff to the 1419 * destination address of the mremap to allow 1420 * the merge to happen. mremap must change the 1421 * vm_pgoff linearity between src and dst vmas 1422 * (in turn preventing a vma_merge) to be 1423 * safe. It is only safe to keep the vm_pgoff 1424 * linear if there are no pages mapped yet. 1425 */ 1426 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); 1427 *vmap = vma = new_vma; 1428 } 1429 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); 1430 } else { 1431 new_vma = vm_area_dup(vma); 1432 if (!new_vma) 1433 goto out; 1434 vma_set_range(new_vma, addr, addr + len, pgoff); 1435 if (vma_dup_policy(vma, new_vma)) 1436 goto out_free_vma; 1437 if (anon_vma_clone(new_vma, vma)) 1438 goto out_free_mempol; 1439 if (new_vma->vm_file) 1440 get_file(new_vma->vm_file); 1441 if (new_vma->vm_ops && new_vma->vm_ops->open) 1442 new_vma->vm_ops->open(new_vma); 1443 if (vma_link(mm, new_vma)) 1444 goto out_vma_link; 1445 *need_rmap_locks = false; 1446 } 1447 return new_vma; 1448 1449 out_vma_link: 1450 if (new_vma->vm_ops && new_vma->vm_ops->close) 1451 new_vma->vm_ops->close(new_vma); 1452 1453 if (new_vma->vm_file) 1454 fput(new_vma->vm_file); 1455 1456 unlink_anon_vmas(new_vma); 1457 out_free_mempol: 1458 mpol_put(vma_policy(new_vma)); 1459 out_free_vma: 1460 vm_area_free(new_vma); 1461 out: 1462 return NULL; 1463 } 1464 1465 /* 1466 * Rough compatibility check to quickly see if it's even worth looking 1467 * at sharing an anon_vma. 1468 * 1469 * They need to have the same vm_file, and the flags can only differ 1470 * in things that mprotect may change. 1471 * 1472 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that 1473 * we can merge the two vma's. For example, we refuse to merge a vma if 1474 * there is a vm_ops->close() function, because that indicates that the 1475 * driver is doing some kind of reference counting. But that doesn't 1476 * really matter for the anon_vma sharing case. 1477 */ 1478 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) 1479 { 1480 return a->vm_end == b->vm_start && 1481 mpol_equal(vma_policy(a), vma_policy(b)) && 1482 a->vm_file == b->vm_file && 1483 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) && 1484 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); 1485 } 1486 1487 /* 1488 * Do some basic sanity checking to see if we can re-use the anon_vma 1489 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be 1490 * the same as 'old', the other will be the new one that is trying 1491 * to share the anon_vma. 1492 * 1493 * NOTE! This runs with mmap_lock held for reading, so it is possible that 1494 * the anon_vma of 'old' is concurrently in the process of being set up 1495 * by another page fault trying to merge _that_. But that's ok: if it 1496 * is being set up, that automatically means that it will be a singleton 1497 * acceptable for merging, so we can do all of this optimistically. But 1498 * we do that READ_ONCE() to make sure that we never re-load the pointer. 1499 * 1500 * IOW: that the "list_is_singular()" test on the anon_vma_chain only 1501 * matters for the 'stable anon_vma' case (ie the thing we want to avoid 1502 * is to return an anon_vma that is "complex" due to having gone through 1503 * a fork). 1504 * 1505 * We also make sure that the two vma's are compatible (adjacent, 1506 * and with the same memory policies). That's all stable, even with just 1507 * a read lock on the mmap_lock. 1508 */ 1509 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, 1510 struct vm_area_struct *a, 1511 struct vm_area_struct *b) 1512 { 1513 if (anon_vma_compatible(a, b)) { 1514 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); 1515 1516 if (anon_vma && list_is_singular(&old->anon_vma_chain)) 1517 return anon_vma; 1518 } 1519 return NULL; 1520 } 1521 1522 /* 1523 * find_mergeable_anon_vma is used by anon_vma_prepare, to check 1524 * neighbouring vmas for a suitable anon_vma, before it goes off 1525 * to allocate a new anon_vma. It checks because a repetitive 1526 * sequence of mprotects and faults may otherwise lead to distinct 1527 * anon_vmas being allocated, preventing vma merge in subsequent 1528 * mprotect. 1529 */ 1530 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) 1531 { 1532 struct anon_vma *anon_vma = NULL; 1533 struct vm_area_struct *prev, *next; 1534 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end); 1535 1536 /* Try next first. */ 1537 next = vma_iter_load(&vmi); 1538 if (next) { 1539 anon_vma = reusable_anon_vma(next, vma, next); 1540 if (anon_vma) 1541 return anon_vma; 1542 } 1543 1544 prev = vma_prev(&vmi); 1545 VM_BUG_ON_VMA(prev != vma, vma); 1546 prev = vma_prev(&vmi); 1547 /* Try prev next. */ 1548 if (prev) 1549 anon_vma = reusable_anon_vma(prev, prev, vma); 1550 1551 /* 1552 * We might reach here with anon_vma == NULL if we can't find 1553 * any reusable anon_vma. 1554 * There's no absolute need to look only at touching neighbours: 1555 * we could search further afield for "compatible" anon_vmas. 1556 * But it would probably just be a waste of time searching, 1557 * or lead to too many vmas hanging off the same anon_vma. 1558 * We're trying to allow mprotect remerging later on, 1559 * not trying to minimize memory used for anon_vmas. 1560 */ 1561 return anon_vma; 1562 } 1563 1564 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops) 1565 { 1566 return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite); 1567 } 1568 1569 static bool vma_is_shared_writable(struct vm_area_struct *vma) 1570 { 1571 return (vma->vm_flags & (VM_WRITE | VM_SHARED)) == 1572 (VM_WRITE | VM_SHARED); 1573 } 1574 1575 static bool vma_fs_can_writeback(struct vm_area_struct *vma) 1576 { 1577 /* No managed pages to writeback. */ 1578 if (vma->vm_flags & VM_PFNMAP) 1579 return false; 1580 1581 return vma->vm_file && vma->vm_file->f_mapping && 1582 mapping_can_writeback(vma->vm_file->f_mapping); 1583 } 1584 1585 /* 1586 * Does this VMA require the underlying folios to have their dirty state 1587 * tracked? 1588 */ 1589 bool vma_needs_dirty_tracking(struct vm_area_struct *vma) 1590 { 1591 /* Only shared, writable VMAs require dirty tracking. */ 1592 if (!vma_is_shared_writable(vma)) 1593 return false; 1594 1595 /* Does the filesystem need to be notified? */ 1596 if (vm_ops_needs_writenotify(vma->vm_ops)) 1597 return true; 1598 1599 /* 1600 * Even if the filesystem doesn't indicate a need for writenotify, if it 1601 * can writeback, dirty tracking is still required. 1602 */ 1603 return vma_fs_can_writeback(vma); 1604 } 1605 1606 /* 1607 * Some shared mappings will want the pages marked read-only 1608 * to track write events. If so, we'll downgrade vm_page_prot 1609 * to the private version (using protection_map[] without the 1610 * VM_SHARED bit). 1611 */ 1612 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) 1613 { 1614 /* If it was private or non-writable, the write bit is already clear */ 1615 if (!vma_is_shared_writable(vma)) 1616 return false; 1617 1618 /* The backer wishes to know when pages are first written to? */ 1619 if (vm_ops_needs_writenotify(vma->vm_ops)) 1620 return true; 1621 1622 /* The open routine did something to the protections that pgprot_modify 1623 * won't preserve? */ 1624 if (pgprot_val(vm_page_prot) != 1625 pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags))) 1626 return false; 1627 1628 /* 1629 * Do we need to track softdirty? hugetlb does not support softdirty 1630 * tracking yet. 1631 */ 1632 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma)) 1633 return true; 1634 1635 /* Do we need write faults for uffd-wp tracking? */ 1636 if (userfaultfd_wp(vma)) 1637 return true; 1638 1639 /* Can the mapping track the dirty pages? */ 1640 return vma_fs_can_writeback(vma); 1641 } 1642 1643 static DEFINE_MUTEX(mm_all_locks_mutex); 1644 1645 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) 1646 { 1647 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 1648 /* 1649 * The LSB of head.next can't change from under us 1650 * because we hold the mm_all_locks_mutex. 1651 */ 1652 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); 1653 /* 1654 * We can safely modify head.next after taking the 1655 * anon_vma->root->rwsem. If some other vma in this mm shares 1656 * the same anon_vma we won't take it again. 1657 * 1658 * No need of atomic instructions here, head.next 1659 * can't change from under us thanks to the 1660 * anon_vma->root->rwsem. 1661 */ 1662 if (__test_and_set_bit(0, (unsigned long *) 1663 &anon_vma->root->rb_root.rb_root.rb_node)) 1664 BUG(); 1665 } 1666 } 1667 1668 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) 1669 { 1670 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 1671 /* 1672 * AS_MM_ALL_LOCKS can't change from under us because 1673 * we hold the mm_all_locks_mutex. 1674 * 1675 * Operations on ->flags have to be atomic because 1676 * even if AS_MM_ALL_LOCKS is stable thanks to the 1677 * mm_all_locks_mutex, there may be other cpus 1678 * changing other bitflags in parallel to us. 1679 */ 1680 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) 1681 BUG(); 1682 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); 1683 } 1684 } 1685 1686 /* 1687 * This operation locks against the VM for all pte/vma/mm related 1688 * operations that could ever happen on a certain mm. This includes 1689 * vmtruncate, try_to_unmap, and all page faults. 1690 * 1691 * The caller must take the mmap_lock in write mode before calling 1692 * mm_take_all_locks(). The caller isn't allowed to release the 1693 * mmap_lock until mm_drop_all_locks() returns. 1694 * 1695 * mmap_lock in write mode is required in order to block all operations 1696 * that could modify pagetables and free pages without need of 1697 * altering the vma layout. It's also needed in write mode to avoid new 1698 * anon_vmas to be associated with existing vmas. 1699 * 1700 * A single task can't take more than one mm_take_all_locks() in a row 1701 * or it would deadlock. 1702 * 1703 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in 1704 * mapping->flags avoid to take the same lock twice, if more than one 1705 * vma in this mm is backed by the same anon_vma or address_space. 1706 * 1707 * We take locks in following order, accordingly to comment at beginning 1708 * of mm/rmap.c: 1709 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for 1710 * hugetlb mapping); 1711 * - all vmas marked locked 1712 * - all i_mmap_rwsem locks; 1713 * - all anon_vma->rwseml 1714 * 1715 * We can take all locks within these types randomly because the VM code 1716 * doesn't nest them and we protected from parallel mm_take_all_locks() by 1717 * mm_all_locks_mutex. 1718 * 1719 * mm_take_all_locks() and mm_drop_all_locks are expensive operations 1720 * that may have to take thousand of locks. 1721 * 1722 * mm_take_all_locks() can fail if it's interrupted by signals. 1723 */ 1724 int mm_take_all_locks(struct mm_struct *mm) 1725 { 1726 struct vm_area_struct *vma; 1727 struct anon_vma_chain *avc; 1728 VMA_ITERATOR(vmi, mm, 0); 1729 1730 mmap_assert_write_locked(mm); 1731 1732 mutex_lock(&mm_all_locks_mutex); 1733 1734 /* 1735 * vma_start_write() does not have a complement in mm_drop_all_locks() 1736 * because vma_start_write() is always asymmetrical; it marks a VMA as 1737 * being written to until mmap_write_unlock() or mmap_write_downgrade() 1738 * is reached. 1739 */ 1740 for_each_vma(vmi, vma) { 1741 if (signal_pending(current)) 1742 goto out_unlock; 1743 vma_start_write(vma); 1744 } 1745 1746 vma_iter_init(&vmi, mm, 0); 1747 for_each_vma(vmi, vma) { 1748 if (signal_pending(current)) 1749 goto out_unlock; 1750 if (vma->vm_file && vma->vm_file->f_mapping && 1751 is_vm_hugetlb_page(vma)) 1752 vm_lock_mapping(mm, vma->vm_file->f_mapping); 1753 } 1754 1755 vma_iter_init(&vmi, mm, 0); 1756 for_each_vma(vmi, vma) { 1757 if (signal_pending(current)) 1758 goto out_unlock; 1759 if (vma->vm_file && vma->vm_file->f_mapping && 1760 !is_vm_hugetlb_page(vma)) 1761 vm_lock_mapping(mm, vma->vm_file->f_mapping); 1762 } 1763 1764 vma_iter_init(&vmi, mm, 0); 1765 for_each_vma(vmi, vma) { 1766 if (signal_pending(current)) 1767 goto out_unlock; 1768 if (vma->anon_vma) 1769 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 1770 vm_lock_anon_vma(mm, avc->anon_vma); 1771 } 1772 1773 return 0; 1774 1775 out_unlock: 1776 mm_drop_all_locks(mm); 1777 return -EINTR; 1778 } 1779 1780 static void vm_unlock_anon_vma(struct anon_vma *anon_vma) 1781 { 1782 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 1783 /* 1784 * The LSB of head.next can't change to 0 from under 1785 * us because we hold the mm_all_locks_mutex. 1786 * 1787 * We must however clear the bitflag before unlocking 1788 * the vma so the users using the anon_vma->rb_root will 1789 * never see our bitflag. 1790 * 1791 * No need of atomic instructions here, head.next 1792 * can't change from under us until we release the 1793 * anon_vma->root->rwsem. 1794 */ 1795 if (!__test_and_clear_bit(0, (unsigned long *) 1796 &anon_vma->root->rb_root.rb_root.rb_node)) 1797 BUG(); 1798 anon_vma_unlock_write(anon_vma); 1799 } 1800 } 1801 1802 static void vm_unlock_mapping(struct address_space *mapping) 1803 { 1804 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 1805 /* 1806 * AS_MM_ALL_LOCKS can't change to 0 from under us 1807 * because we hold the mm_all_locks_mutex. 1808 */ 1809 i_mmap_unlock_write(mapping); 1810 if (!test_and_clear_bit(AS_MM_ALL_LOCKS, 1811 &mapping->flags)) 1812 BUG(); 1813 } 1814 } 1815 1816 /* 1817 * The mmap_lock cannot be released by the caller until 1818 * mm_drop_all_locks() returns. 1819 */ 1820 void mm_drop_all_locks(struct mm_struct *mm) 1821 { 1822 struct vm_area_struct *vma; 1823 struct anon_vma_chain *avc; 1824 VMA_ITERATOR(vmi, mm, 0); 1825 1826 mmap_assert_write_locked(mm); 1827 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); 1828 1829 for_each_vma(vmi, vma) { 1830 if (vma->anon_vma) 1831 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 1832 vm_unlock_anon_vma(avc->anon_vma); 1833 if (vma->vm_file && vma->vm_file->f_mapping) 1834 vm_unlock_mapping(vma->vm_file->f_mapping); 1835 } 1836 1837 mutex_unlock(&mm_all_locks_mutex); 1838 } 1839