1 // SPDX-License-Identifier: GPL-2.0-or-later 2 3 /* 4 * VMA-specific functions. 5 */ 6 7 #include "vma_internal.h" 8 #include "vma.h" 9 10 static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next) 11 { 12 struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev; 13 /* 14 * If the vma has a ->close operation then the driver probably needs to 15 * release per-vma resources, so we don't attempt to merge those if the 16 * caller indicates the current vma may be removed as part of the merge, 17 * which is the case if we are attempting to merge the next VMA into 18 * this one. 19 */ 20 bool may_remove_vma = merge_next; 21 22 if (!mpol_equal(vmg->policy, vma_policy(vma))) 23 return false; 24 /* 25 * VM_SOFTDIRTY should not prevent from VMA merging, if we 26 * match the flags but dirty bit -- the caller should mark 27 * merged VMA as dirty. If dirty bit won't be excluded from 28 * comparison, we increase pressure on the memory system forcing 29 * the kernel to generate new VMAs when old one could be 30 * extended instead. 31 */ 32 if ((vma->vm_flags ^ vmg->flags) & ~VM_SOFTDIRTY) 33 return false; 34 if (vma->vm_file != vmg->file) 35 return false; 36 if (may_remove_vma && vma->vm_ops && vma->vm_ops->close) 37 return false; 38 if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx)) 39 return false; 40 if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name)) 41 return false; 42 return true; 43 } 44 45 static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1, 46 struct anon_vma *anon_vma2, struct vm_area_struct *vma) 47 { 48 /* 49 * The list_is_singular() test is to avoid merging VMA cloned from 50 * parents. This can improve scalability caused by anon_vma lock. 51 */ 52 if ((!anon_vma1 || !anon_vma2) && (!vma || 53 list_is_singular(&vma->anon_vma_chain))) 54 return true; 55 return anon_vma1 == anon_vma2; 56 } 57 58 /* 59 * init_multi_vma_prep() - Initializer for struct vma_prepare 60 * @vp: The vma_prepare struct 61 * @vma: The vma that will be altered once locked 62 * @next: The next vma if it is to be adjusted 63 * @remove: The first vma to be removed 64 * @remove2: The second vma to be removed 65 */ 66 static void init_multi_vma_prep(struct vma_prepare *vp, 67 struct vm_area_struct *vma, 68 struct vm_area_struct *next, 69 struct vm_area_struct *remove, 70 struct vm_area_struct *remove2) 71 { 72 memset(vp, 0, sizeof(struct vma_prepare)); 73 vp->vma = vma; 74 vp->anon_vma = vma->anon_vma; 75 vp->remove = remove; 76 vp->remove2 = remove2; 77 vp->adj_next = next; 78 if (!vp->anon_vma && next) 79 vp->anon_vma = next->anon_vma; 80 81 vp->file = vma->vm_file; 82 if (vp->file) 83 vp->mapping = vma->vm_file->f_mapping; 84 85 } 86 87 /* 88 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 89 * in front of (at a lower virtual address and file offset than) the vma. 90 * 91 * We cannot merge two vmas if they have differently assigned (non-NULL) 92 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 93 * 94 * We don't check here for the merged mmap wrapping around the end of pagecache 95 * indices (16TB on ia32) because do_mmap() does not permit mmap's which 96 * wrap, nor mmaps which cover the final page at index -1UL. 97 * 98 * We assume the vma may be removed as part of the merge. 99 */ 100 bool 101 can_vma_merge_before(struct vma_merge_struct *vmg) 102 { 103 pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start); 104 105 if (is_mergeable_vma(vmg, /* merge_next = */ true) && 106 is_mergeable_anon_vma(vmg->anon_vma, vmg->next->anon_vma, vmg->next)) { 107 if (vmg->next->vm_pgoff == vmg->pgoff + pglen) 108 return true; 109 } 110 111 return false; 112 } 113 114 /* 115 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 116 * beyond (at a higher virtual address and file offset than) the vma. 117 * 118 * We cannot merge two vmas if they have differently assigned (non-NULL) 119 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 120 * 121 * We assume that vma is not removed as part of the merge. 122 */ 123 bool can_vma_merge_after(struct vma_merge_struct *vmg) 124 { 125 if (is_mergeable_vma(vmg, /* merge_next = */ false) && 126 is_mergeable_anon_vma(vmg->anon_vma, vmg->prev->anon_vma, vmg->prev)) { 127 if (vmg->prev->vm_pgoff + vma_pages(vmg->prev) == vmg->pgoff) 128 return true; 129 } 130 return false; 131 } 132 133 /* 134 * Close a vm structure and free it. 135 */ 136 void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed) 137 { 138 might_sleep(); 139 if (!closed && vma->vm_ops && vma->vm_ops->close) 140 vma->vm_ops->close(vma); 141 if (vma->vm_file) 142 fput(vma->vm_file); 143 mpol_put(vma_policy(vma)); 144 if (unreachable) 145 __vm_area_free(vma); 146 else 147 vm_area_free(vma); 148 } 149 150 /* 151 * Get rid of page table information in the indicated region. 152 * 153 * Called with the mm semaphore held. 154 */ 155 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, 156 struct vm_area_struct *prev, struct vm_area_struct *next) 157 { 158 struct mm_struct *mm = vma->vm_mm; 159 struct mmu_gather tlb; 160 161 lru_add_drain(); 162 tlb_gather_mmu(&tlb, mm); 163 update_hiwater_rss(mm); 164 unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end, 165 /* mm_wr_locked = */ true); 166 mas_set(mas, vma->vm_end); 167 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 168 next ? next->vm_start : USER_PGTABLES_CEILING, 169 /* mm_wr_locked = */ true); 170 tlb_finish_mmu(&tlb); 171 } 172 173 /* 174 * __split_vma() bypasses sysctl_max_map_count checking. We use this where it 175 * has already been checked or doesn't make sense to fail. 176 * VMA Iterator will point to the original VMA. 177 */ 178 static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, 179 unsigned long addr, int new_below) 180 { 181 struct vma_prepare vp; 182 struct vm_area_struct *new; 183 int err; 184 185 WARN_ON(vma->vm_start >= addr); 186 WARN_ON(vma->vm_end <= addr); 187 188 if (vma->vm_ops && vma->vm_ops->may_split) { 189 err = vma->vm_ops->may_split(vma, addr); 190 if (err) 191 return err; 192 } 193 194 new = vm_area_dup(vma); 195 if (!new) 196 return -ENOMEM; 197 198 if (new_below) { 199 new->vm_end = addr; 200 } else { 201 new->vm_start = addr; 202 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); 203 } 204 205 err = -ENOMEM; 206 vma_iter_config(vmi, new->vm_start, new->vm_end); 207 if (vma_iter_prealloc(vmi, new)) 208 goto out_free_vma; 209 210 err = vma_dup_policy(vma, new); 211 if (err) 212 goto out_free_vmi; 213 214 err = anon_vma_clone(new, vma); 215 if (err) 216 goto out_free_mpol; 217 218 if (new->vm_file) 219 get_file(new->vm_file); 220 221 if (new->vm_ops && new->vm_ops->open) 222 new->vm_ops->open(new); 223 224 vma_start_write(vma); 225 vma_start_write(new); 226 227 init_vma_prep(&vp, vma); 228 vp.insert = new; 229 vma_prepare(&vp); 230 vma_adjust_trans_huge(vma, vma->vm_start, addr, 0); 231 232 if (new_below) { 233 vma->vm_start = addr; 234 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT; 235 } else { 236 vma->vm_end = addr; 237 } 238 239 /* vma_complete stores the new vma */ 240 vma_complete(&vp, vmi, vma->vm_mm); 241 validate_mm(vma->vm_mm); 242 243 /* Success. */ 244 if (new_below) 245 vma_next(vmi); 246 else 247 vma_prev(vmi); 248 249 return 0; 250 251 out_free_mpol: 252 mpol_put(vma_policy(new)); 253 out_free_vmi: 254 vma_iter_free(vmi); 255 out_free_vma: 256 vm_area_free(new); 257 return err; 258 } 259 260 /* 261 * Split a vma into two pieces at address 'addr', a new vma is allocated 262 * either for the first part or the tail. 263 */ 264 static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, 265 unsigned long addr, int new_below) 266 { 267 if (vma->vm_mm->map_count >= sysctl_max_map_count) 268 return -ENOMEM; 269 270 return __split_vma(vmi, vma, addr, new_below); 271 } 272 273 /* 274 * init_vma_prep() - Initializer wrapper for vma_prepare struct 275 * @vp: The vma_prepare struct 276 * @vma: The vma that will be altered once locked 277 */ 278 void init_vma_prep(struct vma_prepare *vp, 279 struct vm_area_struct *vma) 280 { 281 init_multi_vma_prep(vp, vma, NULL, NULL, NULL); 282 } 283 284 /* 285 * Requires inode->i_mapping->i_mmap_rwsem 286 */ 287 static void __remove_shared_vm_struct(struct vm_area_struct *vma, 288 struct address_space *mapping) 289 { 290 if (vma_is_shared_maywrite(vma)) 291 mapping_unmap_writable(mapping); 292 293 flush_dcache_mmap_lock(mapping); 294 vma_interval_tree_remove(vma, &mapping->i_mmap); 295 flush_dcache_mmap_unlock(mapping); 296 } 297 298 /* 299 * vma has some anon_vma assigned, and is already inserted on that 300 * anon_vma's interval trees. 301 * 302 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the 303 * vma must be removed from the anon_vma's interval trees using 304 * anon_vma_interval_tree_pre_update_vma(). 305 * 306 * After the update, the vma will be reinserted using 307 * anon_vma_interval_tree_post_update_vma(). 308 * 309 * The entire update must be protected by exclusive mmap_lock and by 310 * the root anon_vma's mutex. 311 */ 312 void 313 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) 314 { 315 struct anon_vma_chain *avc; 316 317 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 318 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); 319 } 320 321 void 322 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) 323 { 324 struct anon_vma_chain *avc; 325 326 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 327 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); 328 } 329 330 static void __vma_link_file(struct vm_area_struct *vma, 331 struct address_space *mapping) 332 { 333 if (vma_is_shared_maywrite(vma)) 334 mapping_allow_writable(mapping); 335 336 flush_dcache_mmap_lock(mapping); 337 vma_interval_tree_insert(vma, &mapping->i_mmap); 338 flush_dcache_mmap_unlock(mapping); 339 } 340 341 /* 342 * vma_prepare() - Helper function for handling locking VMAs prior to altering 343 * @vp: The initialized vma_prepare struct 344 */ 345 void vma_prepare(struct vma_prepare *vp) 346 { 347 if (vp->file) { 348 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end); 349 350 if (vp->adj_next) 351 uprobe_munmap(vp->adj_next, vp->adj_next->vm_start, 352 vp->adj_next->vm_end); 353 354 i_mmap_lock_write(vp->mapping); 355 if (vp->insert && vp->insert->vm_file) { 356 /* 357 * Put into interval tree now, so instantiated pages 358 * are visible to arm/parisc __flush_dcache_page 359 * throughout; but we cannot insert into address 360 * space until vma start or end is updated. 361 */ 362 __vma_link_file(vp->insert, 363 vp->insert->vm_file->f_mapping); 364 } 365 } 366 367 if (vp->anon_vma) { 368 anon_vma_lock_write(vp->anon_vma); 369 anon_vma_interval_tree_pre_update_vma(vp->vma); 370 if (vp->adj_next) 371 anon_vma_interval_tree_pre_update_vma(vp->adj_next); 372 } 373 374 if (vp->file) { 375 flush_dcache_mmap_lock(vp->mapping); 376 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap); 377 if (vp->adj_next) 378 vma_interval_tree_remove(vp->adj_next, 379 &vp->mapping->i_mmap); 380 } 381 382 } 383 384 /* 385 * dup_anon_vma() - Helper function to duplicate anon_vma 386 * @dst: The destination VMA 387 * @src: The source VMA 388 * @dup: Pointer to the destination VMA when successful. 389 * 390 * Returns: 0 on success. 391 */ 392 static int dup_anon_vma(struct vm_area_struct *dst, 393 struct vm_area_struct *src, struct vm_area_struct **dup) 394 { 395 /* 396 * Easily overlooked: when mprotect shifts the boundary, make sure the 397 * expanding vma has anon_vma set if the shrinking vma had, to cover any 398 * anon pages imported. 399 */ 400 if (src->anon_vma && !dst->anon_vma) { 401 int ret; 402 403 vma_assert_write_locked(dst); 404 dst->anon_vma = src->anon_vma; 405 ret = anon_vma_clone(dst, src); 406 if (ret) 407 return ret; 408 409 *dup = dst; 410 } 411 412 return 0; 413 } 414 415 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 416 void validate_mm(struct mm_struct *mm) 417 { 418 int bug = 0; 419 int i = 0; 420 struct vm_area_struct *vma; 421 VMA_ITERATOR(vmi, mm, 0); 422 423 mt_validate(&mm->mm_mt); 424 for_each_vma(vmi, vma) { 425 #ifdef CONFIG_DEBUG_VM_RB 426 struct anon_vma *anon_vma = vma->anon_vma; 427 struct anon_vma_chain *avc; 428 #endif 429 unsigned long vmi_start, vmi_end; 430 bool warn = 0; 431 432 vmi_start = vma_iter_addr(&vmi); 433 vmi_end = vma_iter_end(&vmi); 434 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm)) 435 warn = 1; 436 437 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm)) 438 warn = 1; 439 440 if (warn) { 441 pr_emerg("issue in %s\n", current->comm); 442 dump_stack(); 443 dump_vma(vma); 444 pr_emerg("tree range: %px start %lx end %lx\n", vma, 445 vmi_start, vmi_end - 1); 446 vma_iter_dump_tree(&vmi); 447 } 448 449 #ifdef CONFIG_DEBUG_VM_RB 450 if (anon_vma) { 451 anon_vma_lock_read(anon_vma); 452 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 453 anon_vma_interval_tree_verify(avc); 454 anon_vma_unlock_read(anon_vma); 455 } 456 #endif 457 i++; 458 } 459 if (i != mm->map_count) { 460 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i); 461 bug = 1; 462 } 463 VM_BUG_ON_MM(bug, mm); 464 } 465 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ 466 467 /* 468 * vma_expand - Expand an existing VMA 469 * 470 * @vmg: Describes a VMA expansion operation. 471 * 472 * Expand @vma to vmg->start and vmg->end. Can expand off the start and end. 473 * Will expand over vmg->next if it's different from vmg->vma and vmg->end == 474 * vmg->next->vm_end. Checking if the vmg->vma can expand and merge with 475 * vmg->next needs to be handled by the caller. 476 * 477 * Returns: 0 on success 478 */ 479 int vma_expand(struct vma_merge_struct *vmg) 480 { 481 struct vm_area_struct *anon_dup = NULL; 482 bool remove_next = false; 483 struct vm_area_struct *vma = vmg->vma; 484 struct vm_area_struct *next = vmg->next; 485 struct vma_prepare vp; 486 487 vma_start_write(vma); 488 if (next && (vma != next) && (vmg->end == next->vm_end)) { 489 int ret; 490 491 remove_next = true; 492 vma_start_write(next); 493 ret = dup_anon_vma(vma, next, &anon_dup); 494 if (ret) 495 return ret; 496 } 497 498 init_multi_vma_prep(&vp, vma, NULL, remove_next ? next : NULL, NULL); 499 /* Not merging but overwriting any part of next is not handled. */ 500 VM_WARN_ON(next && !vp.remove && 501 next != vma && vmg->end > next->vm_start); 502 /* Only handles expanding */ 503 VM_WARN_ON(vma->vm_start < vmg->start || vma->vm_end > vmg->end); 504 505 /* Note: vma iterator must be pointing to 'start' */ 506 vma_iter_config(vmg->vmi, vmg->start, vmg->end); 507 if (vma_iter_prealloc(vmg->vmi, vma)) 508 goto nomem; 509 510 vma_prepare(&vp); 511 vma_adjust_trans_huge(vma, vmg->start, vmg->end, 0); 512 vma_set_range(vma, vmg->start, vmg->end, vmg->pgoff); 513 vma_iter_store(vmg->vmi, vma); 514 515 vma_complete(&vp, vmg->vmi, vma->vm_mm); 516 return 0; 517 518 nomem: 519 if (anon_dup) 520 unlink_anon_vmas(anon_dup); 521 return -ENOMEM; 522 } 523 524 /* 525 * vma_shrink() - Reduce an existing VMAs memory area 526 * @vmi: The vma iterator 527 * @vma: The VMA to modify 528 * @start: The new start 529 * @end: The new end 530 * 531 * Returns: 0 on success, -ENOMEM otherwise 532 */ 533 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, 534 unsigned long start, unsigned long end, pgoff_t pgoff) 535 { 536 struct vma_prepare vp; 537 538 WARN_ON((vma->vm_start != start) && (vma->vm_end != end)); 539 540 if (vma->vm_start < start) 541 vma_iter_config(vmi, vma->vm_start, start); 542 else 543 vma_iter_config(vmi, end, vma->vm_end); 544 545 if (vma_iter_prealloc(vmi, NULL)) 546 return -ENOMEM; 547 548 vma_start_write(vma); 549 550 init_vma_prep(&vp, vma); 551 vma_prepare(&vp); 552 vma_adjust_trans_huge(vma, start, end, 0); 553 554 vma_iter_clear(vmi); 555 vma_set_range(vma, start, end, pgoff); 556 vma_complete(&vp, vmi, vma->vm_mm); 557 validate_mm(vma->vm_mm); 558 return 0; 559 } 560 561 /* 562 * vma_complete- Helper function for handling the unlocking after altering VMAs, 563 * or for inserting a VMA. 564 * 565 * @vp: The vma_prepare struct 566 * @vmi: The vma iterator 567 * @mm: The mm_struct 568 */ 569 void vma_complete(struct vma_prepare *vp, 570 struct vma_iterator *vmi, struct mm_struct *mm) 571 { 572 if (vp->file) { 573 if (vp->adj_next) 574 vma_interval_tree_insert(vp->adj_next, 575 &vp->mapping->i_mmap); 576 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap); 577 flush_dcache_mmap_unlock(vp->mapping); 578 } 579 580 if (vp->remove && vp->file) { 581 __remove_shared_vm_struct(vp->remove, vp->mapping); 582 if (vp->remove2) 583 __remove_shared_vm_struct(vp->remove2, vp->mapping); 584 } else if (vp->insert) { 585 /* 586 * split_vma has split insert from vma, and needs 587 * us to insert it before dropping the locks 588 * (it may either follow vma or precede it). 589 */ 590 vma_iter_store(vmi, vp->insert); 591 mm->map_count++; 592 } 593 594 if (vp->anon_vma) { 595 anon_vma_interval_tree_post_update_vma(vp->vma); 596 if (vp->adj_next) 597 anon_vma_interval_tree_post_update_vma(vp->adj_next); 598 anon_vma_unlock_write(vp->anon_vma); 599 } 600 601 if (vp->file) { 602 i_mmap_unlock_write(vp->mapping); 603 uprobe_mmap(vp->vma); 604 605 if (vp->adj_next) 606 uprobe_mmap(vp->adj_next); 607 } 608 609 if (vp->remove) { 610 again: 611 vma_mark_detached(vp->remove, true); 612 if (vp->file) { 613 uprobe_munmap(vp->remove, vp->remove->vm_start, 614 vp->remove->vm_end); 615 fput(vp->file); 616 } 617 if (vp->remove->anon_vma) 618 anon_vma_merge(vp->vma, vp->remove); 619 mm->map_count--; 620 mpol_put(vma_policy(vp->remove)); 621 if (!vp->remove2) 622 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end); 623 vm_area_free(vp->remove); 624 625 /* 626 * In mprotect's case 6 (see comments on vma_merge), 627 * we are removing both mid and next vmas 628 */ 629 if (vp->remove2) { 630 vp->remove = vp->remove2; 631 vp->remove2 = NULL; 632 goto again; 633 } 634 } 635 if (vp->insert && vp->file) 636 uprobe_mmap(vp->insert); 637 } 638 639 static inline void vms_clear_ptes(struct vma_munmap_struct *vms, 640 struct ma_state *mas_detach, bool mm_wr_locked) 641 { 642 struct mmu_gather tlb; 643 644 if (!vms->clear_ptes) /* Nothing to do */ 645 return; 646 647 /* 648 * We can free page tables without write-locking mmap_lock because VMAs 649 * were isolated before we downgraded mmap_lock. 650 */ 651 mas_set(mas_detach, 1); 652 lru_add_drain(); 653 tlb_gather_mmu(&tlb, vms->vma->vm_mm); 654 update_hiwater_rss(vms->vma->vm_mm); 655 unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end, 656 vms->vma_count, mm_wr_locked); 657 658 mas_set(mas_detach, 1); 659 /* start and end may be different if there is no prev or next vma. */ 660 free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start, 661 vms->unmap_end, mm_wr_locked); 662 tlb_finish_mmu(&tlb); 663 vms->clear_ptes = false; 664 } 665 666 void vms_clean_up_area(struct vma_munmap_struct *vms, 667 struct ma_state *mas_detach) 668 { 669 struct vm_area_struct *vma; 670 671 if (!vms->nr_pages) 672 return; 673 674 vms_clear_ptes(vms, mas_detach, true); 675 mas_set(mas_detach, 0); 676 mas_for_each(mas_detach, vma, ULONG_MAX) 677 if (vma->vm_ops && vma->vm_ops->close) 678 vma->vm_ops->close(vma); 679 vms->closed_vm_ops = true; 680 } 681 682 /* 683 * vms_complete_munmap_vmas() - Finish the munmap() operation 684 * @vms: The vma munmap struct 685 * @mas_detach: The maple state of the detached vmas 686 * 687 * This updates the mm_struct, unmaps the region, frees the resources 688 * used for the munmap() and may downgrade the lock - if requested. Everything 689 * needed to be done once the vma maple tree is updated. 690 */ 691 void vms_complete_munmap_vmas(struct vma_munmap_struct *vms, 692 struct ma_state *mas_detach) 693 { 694 struct vm_area_struct *vma; 695 struct mm_struct *mm; 696 697 mm = current->mm; 698 mm->map_count -= vms->vma_count; 699 mm->locked_vm -= vms->locked_vm; 700 if (vms->unlock) 701 mmap_write_downgrade(mm); 702 703 if (!vms->nr_pages) 704 return; 705 706 vms_clear_ptes(vms, mas_detach, !vms->unlock); 707 /* Update high watermark before we lower total_vm */ 708 update_hiwater_vm(mm); 709 /* Stat accounting */ 710 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages); 711 /* Paranoid bookkeeping */ 712 VM_WARN_ON(vms->exec_vm > mm->exec_vm); 713 VM_WARN_ON(vms->stack_vm > mm->stack_vm); 714 VM_WARN_ON(vms->data_vm > mm->data_vm); 715 mm->exec_vm -= vms->exec_vm; 716 mm->stack_vm -= vms->stack_vm; 717 mm->data_vm -= vms->data_vm; 718 719 /* Remove and clean up vmas */ 720 mas_set(mas_detach, 0); 721 mas_for_each(mas_detach, vma, ULONG_MAX) 722 remove_vma(vma, /* = */ false, vms->closed_vm_ops); 723 724 vm_unacct_memory(vms->nr_accounted); 725 validate_mm(mm); 726 if (vms->unlock) 727 mmap_read_unlock(mm); 728 729 __mt_destroy(mas_detach->tree); 730 } 731 732 /* 733 * vms_gather_munmap_vmas() - Put all VMAs within a range into a maple tree 734 * for removal at a later date. Handles splitting first and last if necessary 735 * and marking the vmas as isolated. 736 * 737 * @vms: The vma munmap struct 738 * @mas_detach: The maple state tracking the detached tree 739 * 740 * Return: 0 on success, -EPERM on mseal vmas, -ENOMEM otherwise 741 */ 742 int vms_gather_munmap_vmas(struct vma_munmap_struct *vms, 743 struct ma_state *mas_detach) 744 { 745 struct vm_area_struct *next = NULL; 746 int error = -ENOMEM; 747 748 /* 749 * If we need to split any vma, do it now to save pain later. 750 * Does it split the first one? 751 */ 752 if (vms->start > vms->vma->vm_start) { 753 754 /* 755 * Make sure that map_count on return from munmap() will 756 * not exceed its limit; but let map_count go just above 757 * its limit temporarily, to help free resources as expected. 758 */ 759 if (vms->end < vms->vma->vm_end && 760 vms->vma->vm_mm->map_count >= sysctl_max_map_count) 761 goto map_count_exceeded; 762 763 /* Don't bother splitting the VMA if we can't unmap it anyway */ 764 if (!can_modify_vma(vms->vma)) { 765 error = -EPERM; 766 goto start_split_failed; 767 } 768 769 if (__split_vma(vms->vmi, vms->vma, vms->start, 1)) 770 goto start_split_failed; 771 } 772 vms->prev = vma_prev(vms->vmi); 773 if (vms->prev) 774 vms->unmap_start = vms->prev->vm_end; 775 776 /* 777 * Detach a range of VMAs from the mm. Using next as a temp variable as 778 * it is always overwritten. 779 */ 780 for_each_vma_range(*(vms->vmi), next, vms->end) { 781 long nrpages; 782 783 if (!can_modify_vma(next)) { 784 error = -EPERM; 785 goto modify_vma_failed; 786 } 787 /* Does it split the end? */ 788 if (next->vm_end > vms->end) { 789 if (__split_vma(vms->vmi, next, vms->end, 0)) 790 goto end_split_failed; 791 } 792 vma_start_write(next); 793 mas_set(mas_detach, vms->vma_count++); 794 if (mas_store_gfp(mas_detach, next, GFP_KERNEL)) 795 goto munmap_gather_failed; 796 797 vma_mark_detached(next, true); 798 nrpages = vma_pages(next); 799 800 vms->nr_pages += nrpages; 801 if (next->vm_flags & VM_LOCKED) 802 vms->locked_vm += nrpages; 803 804 if (next->vm_flags & VM_ACCOUNT) 805 vms->nr_accounted += nrpages; 806 807 if (is_exec_mapping(next->vm_flags)) 808 vms->exec_vm += nrpages; 809 else if (is_stack_mapping(next->vm_flags)) 810 vms->stack_vm += nrpages; 811 else if (is_data_mapping(next->vm_flags)) 812 vms->data_vm += nrpages; 813 814 if (unlikely(vms->uf)) { 815 /* 816 * If userfaultfd_unmap_prep returns an error the vmas 817 * will remain split, but userland will get a 818 * highly unexpected error anyway. This is no 819 * different than the case where the first of the two 820 * __split_vma fails, but we don't undo the first 821 * split, despite we could. This is unlikely enough 822 * failure that it's not worth optimizing it for. 823 */ 824 if (userfaultfd_unmap_prep(next, vms->start, vms->end, 825 vms->uf)) 826 goto userfaultfd_error; 827 } 828 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 829 BUG_ON(next->vm_start < vms->start); 830 BUG_ON(next->vm_start > vms->end); 831 #endif 832 } 833 834 vms->next = vma_next(vms->vmi); 835 if (vms->next) 836 vms->unmap_end = vms->next->vm_start; 837 838 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 839 /* Make sure no VMAs are about to be lost. */ 840 { 841 MA_STATE(test, mas_detach->tree, 0, 0); 842 struct vm_area_struct *vma_mas, *vma_test; 843 int test_count = 0; 844 845 vma_iter_set(vms->vmi, vms->start); 846 rcu_read_lock(); 847 vma_test = mas_find(&test, vms->vma_count - 1); 848 for_each_vma_range(*(vms->vmi), vma_mas, vms->end) { 849 BUG_ON(vma_mas != vma_test); 850 test_count++; 851 vma_test = mas_next(&test, vms->vma_count - 1); 852 } 853 rcu_read_unlock(); 854 BUG_ON(vms->vma_count != test_count); 855 } 856 #endif 857 858 while (vma_iter_addr(vms->vmi) > vms->start) 859 vma_iter_prev_range(vms->vmi); 860 861 vms->clear_ptes = true; 862 return 0; 863 864 userfaultfd_error: 865 munmap_gather_failed: 866 end_split_failed: 867 modify_vma_failed: 868 reattach_vmas(mas_detach); 869 start_split_failed: 870 map_count_exceeded: 871 return error; 872 } 873 874 /* 875 * do_vmi_align_munmap() - munmap the aligned region from @start to @end. 876 * @vmi: The vma iterator 877 * @vma: The starting vm_area_struct 878 * @mm: The mm_struct 879 * @start: The aligned start address to munmap. 880 * @end: The aligned end address to munmap. 881 * @uf: The userfaultfd list_head 882 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on 883 * success. 884 * 885 * Return: 0 on success and drops the lock if so directed, error and leaves the 886 * lock held otherwise. 887 */ 888 int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 889 struct mm_struct *mm, unsigned long start, unsigned long end, 890 struct list_head *uf, bool unlock) 891 { 892 struct maple_tree mt_detach; 893 MA_STATE(mas_detach, &mt_detach, 0, 0); 894 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); 895 mt_on_stack(mt_detach); 896 struct vma_munmap_struct vms; 897 int error; 898 899 init_vma_munmap(&vms, vmi, vma, start, end, uf, unlock); 900 error = vms_gather_munmap_vmas(&vms, &mas_detach); 901 if (error) 902 goto gather_failed; 903 904 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL); 905 if (error) 906 goto clear_tree_failed; 907 908 /* Point of no return */ 909 vms_complete_munmap_vmas(&vms, &mas_detach); 910 return 0; 911 912 clear_tree_failed: 913 reattach_vmas(&mas_detach); 914 gather_failed: 915 validate_mm(mm); 916 return error; 917 } 918 919 /* 920 * do_vmi_munmap() - munmap a given range. 921 * @vmi: The vma iterator 922 * @mm: The mm_struct 923 * @start: The start address to munmap 924 * @len: The length of the range to munmap 925 * @uf: The userfaultfd list_head 926 * @unlock: set to true if the user wants to drop the mmap_lock on success 927 * 928 * This function takes a @mas that is either pointing to the previous VMA or set 929 * to MA_START and sets it up to remove the mapping(s). The @len will be 930 * aligned. 931 * 932 * Return: 0 on success and drops the lock if so directed, error and leaves the 933 * lock held otherwise. 934 */ 935 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, 936 unsigned long start, size_t len, struct list_head *uf, 937 bool unlock) 938 { 939 unsigned long end; 940 struct vm_area_struct *vma; 941 942 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) 943 return -EINVAL; 944 945 end = start + PAGE_ALIGN(len); 946 if (end == start) 947 return -EINVAL; 948 949 /* Find the first overlapping VMA */ 950 vma = vma_find(vmi, end); 951 if (!vma) { 952 if (unlock) 953 mmap_write_unlock(mm); 954 return 0; 955 } 956 957 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); 958 } 959 960 /* 961 * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name), 962 * figure out whether that can be merged with its predecessor or its 963 * successor. Or both (it neatly fills a hole). 964 * 965 * In most cases - when called for mmap, brk or mremap - [addr,end) is 966 * certain not to be mapped by the time vma_merge is called; but when 967 * called for mprotect, it is certain to be already mapped (either at 968 * an offset within prev, or at the start of next), and the flags of 969 * this area are about to be changed to vm_flags - and the no-change 970 * case has already been eliminated. 971 * 972 * The following mprotect cases have to be considered, where **** is 973 * the area passed down from mprotect_fixup, never extending beyond one 974 * vma, PPPP is the previous vma, CCCC is a concurrent vma that starts 975 * at the same address as **** and is of the same or larger span, and 976 * NNNN the next vma after ****: 977 * 978 * **** **** **** 979 * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPCCCCCC 980 * cannot merge might become might become 981 * PPNNNNNNNNNN PPPPPPPPPPCC 982 * mmap, brk or case 4 below case 5 below 983 * mremap move: 984 * **** **** 985 * PPPP NNNN PPPPCCCCNNNN 986 * might become might become 987 * PPPPPPPPPPPP 1 or PPPPPPPPPPPP 6 or 988 * PPPPPPPPNNNN 2 or PPPPPPPPNNNN 7 or 989 * PPPPNNNNNNNN 3 PPPPNNNNNNNN 8 990 * 991 * It is important for case 8 that the vma CCCC overlapping the 992 * region **** is never going to extended over NNNN. Instead NNNN must 993 * be extended in region **** and CCCC must be removed. This way in 994 * all cases where vma_merge succeeds, the moment vma_merge drops the 995 * rmap_locks, the properties of the merged vma will be already 996 * correct for the whole merged range. Some of those properties like 997 * vm_page_prot/vm_flags may be accessed by rmap_walks and they must 998 * be correct for the whole merged range immediately after the 999 * rmap_locks are released. Otherwise if NNNN would be removed and 1000 * CCCC would be extended over the NNNN range, remove_migration_ptes 1001 * or other rmap walkers (if working on addresses beyond the "end" 1002 * parameter) may establish ptes with the wrong permissions of CCCC 1003 * instead of the right permissions of NNNN. 1004 * 1005 * In the code below: 1006 * PPPP is represented by *prev 1007 * CCCC is represented by *curr or not represented at all (NULL) 1008 * NNNN is represented by *next or not represented at all (NULL) 1009 * **** is not represented - it will be merged and the vma containing the 1010 * area is returned, or the function will return NULL 1011 */ 1012 static struct vm_area_struct *vma_merge(struct vma_merge_struct *vmg) 1013 { 1014 struct mm_struct *mm = vmg->mm; 1015 struct vm_area_struct *prev = vmg->prev; 1016 struct vm_area_struct *curr, *next, *res; 1017 struct vm_area_struct *vma, *adjust, *remove, *remove2; 1018 struct vm_area_struct *anon_dup = NULL; 1019 struct vma_prepare vp; 1020 pgoff_t vma_pgoff; 1021 int err = 0; 1022 bool merge_prev = false; 1023 bool merge_next = false; 1024 bool vma_expanded = false; 1025 unsigned long addr = vmg->start; 1026 unsigned long end = vmg->end; 1027 unsigned long vma_start = addr; 1028 unsigned long vma_end = end; 1029 pgoff_t pglen = PHYS_PFN(end - addr); 1030 long adj_start = 0; 1031 1032 /* 1033 * We later require that vma->vm_flags == vm_flags, 1034 * so this tests vma->vm_flags & VM_SPECIAL, too. 1035 */ 1036 if (vmg->flags & VM_SPECIAL) 1037 return NULL; 1038 1039 /* Does the input range span an existing VMA? (cases 5 - 8) */ 1040 curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end); 1041 1042 if (!curr || /* cases 1 - 4 */ 1043 end == curr->vm_end) /* cases 6 - 8, adjacent VMA */ 1044 next = vmg->next = vma_lookup(mm, end); 1045 else 1046 next = vmg->next = NULL; /* case 5 */ 1047 1048 if (prev) { 1049 vma_start = prev->vm_start; 1050 vma_pgoff = prev->vm_pgoff; 1051 1052 /* Can we merge the predecessor? */ 1053 if (addr == prev->vm_end && can_vma_merge_after(vmg)) { 1054 merge_prev = true; 1055 vma_prev(vmg->vmi); 1056 } 1057 } 1058 1059 /* Can we merge the successor? */ 1060 if (next && can_vma_merge_before(vmg)) { 1061 merge_next = true; 1062 } 1063 1064 /* Verify some invariant that must be enforced by the caller. */ 1065 VM_WARN_ON(prev && addr <= prev->vm_start); 1066 VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end)); 1067 VM_WARN_ON(addr >= end); 1068 1069 if (!merge_prev && !merge_next) 1070 return NULL; /* Not mergeable. */ 1071 1072 if (merge_prev) 1073 vma_start_write(prev); 1074 1075 res = vma = prev; 1076 remove = remove2 = adjust = NULL; 1077 1078 /* Can we merge both the predecessor and the successor? */ 1079 if (merge_prev && merge_next && 1080 is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) { 1081 vma_start_write(next); 1082 remove = next; /* case 1 */ 1083 vma_end = next->vm_end; 1084 err = dup_anon_vma(prev, next, &anon_dup); 1085 if (curr) { /* case 6 */ 1086 vma_start_write(curr); 1087 remove = curr; 1088 remove2 = next; 1089 /* 1090 * Note that the dup_anon_vma below cannot overwrite err 1091 * since the first caller would do nothing unless next 1092 * has an anon_vma. 1093 */ 1094 if (!next->anon_vma) 1095 err = dup_anon_vma(prev, curr, &anon_dup); 1096 } 1097 } else if (merge_prev) { /* case 2 */ 1098 if (curr) { 1099 vma_start_write(curr); 1100 if (end == curr->vm_end) { /* case 7 */ 1101 /* 1102 * can_vma_merge_after() assumed we would not be 1103 * removing prev vma, so it skipped the check 1104 * for vm_ops->close, but we are removing curr 1105 */ 1106 if (curr->vm_ops && curr->vm_ops->close) 1107 err = -EINVAL; 1108 remove = curr; 1109 } else { /* case 5 */ 1110 adjust = curr; 1111 adj_start = (end - curr->vm_start); 1112 } 1113 if (!err) 1114 err = dup_anon_vma(prev, curr, &anon_dup); 1115 } 1116 } else { /* merge_next */ 1117 vma_start_write(next); 1118 res = next; 1119 if (prev && addr < prev->vm_end) { /* case 4 */ 1120 vma_start_write(prev); 1121 vma_end = addr; 1122 adjust = next; 1123 adj_start = -(prev->vm_end - addr); 1124 err = dup_anon_vma(next, prev, &anon_dup); 1125 } else { 1126 /* 1127 * Note that cases 3 and 8 are the ONLY ones where prev 1128 * is permitted to be (but is not necessarily) NULL. 1129 */ 1130 vma = next; /* case 3 */ 1131 vma_start = addr; 1132 vma_end = next->vm_end; 1133 vma_pgoff = next->vm_pgoff - pglen; 1134 if (curr) { /* case 8 */ 1135 vma_pgoff = curr->vm_pgoff; 1136 vma_start_write(curr); 1137 remove = curr; 1138 err = dup_anon_vma(next, curr, &anon_dup); 1139 } 1140 } 1141 } 1142 1143 /* Error in anon_vma clone. */ 1144 if (err) 1145 goto anon_vma_fail; 1146 1147 if (vma_start < vma->vm_start || vma_end > vma->vm_end) 1148 vma_expanded = true; 1149 1150 if (vma_expanded) { 1151 vma_iter_config(vmg->vmi, vma_start, vma_end); 1152 } else { 1153 vma_iter_config(vmg->vmi, adjust->vm_start + adj_start, 1154 adjust->vm_end); 1155 } 1156 1157 if (vma_iter_prealloc(vmg->vmi, vma)) 1158 goto prealloc_fail; 1159 1160 init_multi_vma_prep(&vp, vma, adjust, remove, remove2); 1161 VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma && 1162 vp.anon_vma != adjust->anon_vma); 1163 1164 vma_prepare(&vp); 1165 vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start); 1166 vma_set_range(vma, vma_start, vma_end, vma_pgoff); 1167 1168 if (vma_expanded) 1169 vma_iter_store(vmg->vmi, vma); 1170 1171 if (adj_start) { 1172 adjust->vm_start += adj_start; 1173 adjust->vm_pgoff += adj_start >> PAGE_SHIFT; 1174 if (adj_start < 0) { 1175 WARN_ON(vma_expanded); 1176 vma_iter_store(vmg->vmi, next); 1177 } 1178 } 1179 1180 vma_complete(&vp, vmg->vmi, mm); 1181 validate_mm(mm); 1182 khugepaged_enter_vma(res, vmg->flags); 1183 return res; 1184 1185 prealloc_fail: 1186 if (anon_dup) 1187 unlink_anon_vmas(anon_dup); 1188 1189 anon_vma_fail: 1190 vma_iter_set(vmg->vmi, addr); 1191 vma_iter_load(vmg->vmi); 1192 return NULL; 1193 } 1194 1195 /* 1196 * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd 1197 * context and anonymous VMA name within the range [start, end). 1198 * 1199 * As a result, we might be able to merge the newly modified VMA range with an 1200 * adjacent VMA with identical properties. 1201 * 1202 * If no merge is possible and the range does not span the entirety of the VMA, 1203 * we then need to split the VMA to accommodate the change. 1204 * 1205 * The function returns either the merged VMA, the original VMA if a split was 1206 * required instead, or an error if the split failed. 1207 */ 1208 static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg) 1209 { 1210 struct vm_area_struct *vma = vmg->vma; 1211 struct vm_area_struct *merged; 1212 1213 /* First, try to merge. */ 1214 merged = vma_merge(vmg); 1215 if (merged) 1216 return merged; 1217 1218 /* Split any preceding portion of the VMA. */ 1219 if (vma->vm_start < vmg->start) { 1220 int err = split_vma(vmg->vmi, vma, vmg->start, 1); 1221 1222 if (err) 1223 return ERR_PTR(err); 1224 } 1225 1226 /* Split any trailing portion of the VMA. */ 1227 if (vma->vm_end > vmg->end) { 1228 int err = split_vma(vmg->vmi, vma, vmg->end, 0); 1229 1230 if (err) 1231 return ERR_PTR(err); 1232 } 1233 1234 return vma; 1235 } 1236 1237 struct vm_area_struct *vma_modify_flags( 1238 struct vma_iterator *vmi, struct vm_area_struct *prev, 1239 struct vm_area_struct *vma, unsigned long start, unsigned long end, 1240 unsigned long new_flags) 1241 { 1242 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1243 1244 vmg.flags = new_flags; 1245 1246 return vma_modify(&vmg); 1247 } 1248 1249 struct vm_area_struct 1250 *vma_modify_flags_name(struct vma_iterator *vmi, 1251 struct vm_area_struct *prev, 1252 struct vm_area_struct *vma, 1253 unsigned long start, 1254 unsigned long end, 1255 unsigned long new_flags, 1256 struct anon_vma_name *new_name) 1257 { 1258 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1259 1260 vmg.flags = new_flags; 1261 vmg.anon_name = new_name; 1262 1263 return vma_modify(&vmg); 1264 } 1265 1266 struct vm_area_struct 1267 *vma_modify_policy(struct vma_iterator *vmi, 1268 struct vm_area_struct *prev, 1269 struct vm_area_struct *vma, 1270 unsigned long start, unsigned long end, 1271 struct mempolicy *new_pol) 1272 { 1273 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1274 1275 vmg.policy = new_pol; 1276 1277 return vma_modify(&vmg); 1278 } 1279 1280 struct vm_area_struct 1281 *vma_modify_flags_uffd(struct vma_iterator *vmi, 1282 struct vm_area_struct *prev, 1283 struct vm_area_struct *vma, 1284 unsigned long start, unsigned long end, 1285 unsigned long new_flags, 1286 struct vm_userfaultfd_ctx new_ctx) 1287 { 1288 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1289 1290 vmg.flags = new_flags; 1291 vmg.uffd_ctx = new_ctx; 1292 1293 return vma_modify(&vmg); 1294 } 1295 1296 /* 1297 * Attempt to merge a newly mapped VMA with those adjacent to it. The caller 1298 * must ensure that [start, end) does not overlap any existing VMA. 1299 */ 1300 struct vm_area_struct 1301 *vma_merge_new_vma(struct vma_iterator *vmi, struct vm_area_struct *prev, 1302 struct vm_area_struct *vma, unsigned long start, 1303 unsigned long end, pgoff_t pgoff) 1304 { 1305 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1306 1307 vmg.pgoff = pgoff; 1308 1309 return vma_merge(&vmg); 1310 } 1311 1312 /* 1313 * Expand vma by delta bytes, potentially merging with an immediately adjacent 1314 * VMA with identical properties. 1315 */ 1316 struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi, 1317 struct vm_area_struct *vma, 1318 unsigned long delta) 1319 { 1320 VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta); 1321 1322 /* vma is specified as prev, so case 1 or 2 will apply. */ 1323 return vma_merge(&vmg); 1324 } 1325 1326 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb) 1327 { 1328 vb->count = 0; 1329 } 1330 1331 static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb) 1332 { 1333 struct address_space *mapping; 1334 int i; 1335 1336 mapping = vb->vmas[0]->vm_file->f_mapping; 1337 i_mmap_lock_write(mapping); 1338 for (i = 0; i < vb->count; i++) { 1339 VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping); 1340 __remove_shared_vm_struct(vb->vmas[i], mapping); 1341 } 1342 i_mmap_unlock_write(mapping); 1343 1344 unlink_file_vma_batch_init(vb); 1345 } 1346 1347 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb, 1348 struct vm_area_struct *vma) 1349 { 1350 if (vma->vm_file == NULL) 1351 return; 1352 1353 if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) || 1354 vb->count == ARRAY_SIZE(vb->vmas)) 1355 unlink_file_vma_batch_process(vb); 1356 1357 vb->vmas[vb->count] = vma; 1358 vb->count++; 1359 } 1360 1361 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb) 1362 { 1363 if (vb->count > 0) 1364 unlink_file_vma_batch_process(vb); 1365 } 1366 1367 /* 1368 * Unlink a file-based vm structure from its interval tree, to hide 1369 * vma from rmap and vmtruncate before freeing its page tables. 1370 */ 1371 void unlink_file_vma(struct vm_area_struct *vma) 1372 { 1373 struct file *file = vma->vm_file; 1374 1375 if (file) { 1376 struct address_space *mapping = file->f_mapping; 1377 1378 i_mmap_lock_write(mapping); 1379 __remove_shared_vm_struct(vma, mapping); 1380 i_mmap_unlock_write(mapping); 1381 } 1382 } 1383 1384 void vma_link_file(struct vm_area_struct *vma) 1385 { 1386 struct file *file = vma->vm_file; 1387 struct address_space *mapping; 1388 1389 if (file) { 1390 mapping = file->f_mapping; 1391 i_mmap_lock_write(mapping); 1392 __vma_link_file(vma, mapping); 1393 i_mmap_unlock_write(mapping); 1394 } 1395 } 1396 1397 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) 1398 { 1399 VMA_ITERATOR(vmi, mm, 0); 1400 1401 vma_iter_config(&vmi, vma->vm_start, vma->vm_end); 1402 if (vma_iter_prealloc(&vmi, vma)) 1403 return -ENOMEM; 1404 1405 vma_start_write(vma); 1406 vma_iter_store(&vmi, vma); 1407 vma_link_file(vma); 1408 mm->map_count++; 1409 validate_mm(mm); 1410 return 0; 1411 } 1412 1413 /* 1414 * Copy the vma structure to a new location in the same mm, 1415 * prior to moving page table entries, to effect an mremap move. 1416 */ 1417 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 1418 unsigned long addr, unsigned long len, pgoff_t pgoff, 1419 bool *need_rmap_locks) 1420 { 1421 struct vm_area_struct *vma = *vmap; 1422 unsigned long vma_start = vma->vm_start; 1423 struct mm_struct *mm = vma->vm_mm; 1424 struct vm_area_struct *new_vma, *prev; 1425 bool faulted_in_anon_vma = true; 1426 VMA_ITERATOR(vmi, mm, addr); 1427 1428 /* 1429 * If anonymous vma has not yet been faulted, update new pgoff 1430 * to match new location, to increase its chance of merging. 1431 */ 1432 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { 1433 pgoff = addr >> PAGE_SHIFT; 1434 faulted_in_anon_vma = false; 1435 } 1436 1437 new_vma = find_vma_prev(mm, addr, &prev); 1438 if (new_vma && new_vma->vm_start < addr + len) 1439 return NULL; /* should never get here */ 1440 1441 new_vma = vma_merge_new_vma(&vmi, prev, vma, addr, addr + len, pgoff); 1442 if (new_vma) { 1443 /* 1444 * Source vma may have been merged into new_vma 1445 */ 1446 if (unlikely(vma_start >= new_vma->vm_start && 1447 vma_start < new_vma->vm_end)) { 1448 /* 1449 * The only way we can get a vma_merge with 1450 * self during an mremap is if the vma hasn't 1451 * been faulted in yet and we were allowed to 1452 * reset the dst vma->vm_pgoff to the 1453 * destination address of the mremap to allow 1454 * the merge to happen. mremap must change the 1455 * vm_pgoff linearity between src and dst vmas 1456 * (in turn preventing a vma_merge) to be 1457 * safe. It is only safe to keep the vm_pgoff 1458 * linear if there are no pages mapped yet. 1459 */ 1460 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); 1461 *vmap = vma = new_vma; 1462 } 1463 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); 1464 } else { 1465 new_vma = vm_area_dup(vma); 1466 if (!new_vma) 1467 goto out; 1468 vma_set_range(new_vma, addr, addr + len, pgoff); 1469 if (vma_dup_policy(vma, new_vma)) 1470 goto out_free_vma; 1471 if (anon_vma_clone(new_vma, vma)) 1472 goto out_free_mempol; 1473 if (new_vma->vm_file) 1474 get_file(new_vma->vm_file); 1475 if (new_vma->vm_ops && new_vma->vm_ops->open) 1476 new_vma->vm_ops->open(new_vma); 1477 if (vma_link(mm, new_vma)) 1478 goto out_vma_link; 1479 *need_rmap_locks = false; 1480 } 1481 return new_vma; 1482 1483 out_vma_link: 1484 if (new_vma->vm_ops && new_vma->vm_ops->close) 1485 new_vma->vm_ops->close(new_vma); 1486 1487 if (new_vma->vm_file) 1488 fput(new_vma->vm_file); 1489 1490 unlink_anon_vmas(new_vma); 1491 out_free_mempol: 1492 mpol_put(vma_policy(new_vma)); 1493 out_free_vma: 1494 vm_area_free(new_vma); 1495 out: 1496 return NULL; 1497 } 1498 1499 /* 1500 * Rough compatibility check to quickly see if it's even worth looking 1501 * at sharing an anon_vma. 1502 * 1503 * They need to have the same vm_file, and the flags can only differ 1504 * in things that mprotect may change. 1505 * 1506 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that 1507 * we can merge the two vma's. For example, we refuse to merge a vma if 1508 * there is a vm_ops->close() function, because that indicates that the 1509 * driver is doing some kind of reference counting. But that doesn't 1510 * really matter for the anon_vma sharing case. 1511 */ 1512 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) 1513 { 1514 return a->vm_end == b->vm_start && 1515 mpol_equal(vma_policy(a), vma_policy(b)) && 1516 a->vm_file == b->vm_file && 1517 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) && 1518 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); 1519 } 1520 1521 /* 1522 * Do some basic sanity checking to see if we can re-use the anon_vma 1523 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be 1524 * the same as 'old', the other will be the new one that is trying 1525 * to share the anon_vma. 1526 * 1527 * NOTE! This runs with mmap_lock held for reading, so it is possible that 1528 * the anon_vma of 'old' is concurrently in the process of being set up 1529 * by another page fault trying to merge _that_. But that's ok: if it 1530 * is being set up, that automatically means that it will be a singleton 1531 * acceptable for merging, so we can do all of this optimistically. But 1532 * we do that READ_ONCE() to make sure that we never re-load the pointer. 1533 * 1534 * IOW: that the "list_is_singular()" test on the anon_vma_chain only 1535 * matters for the 'stable anon_vma' case (ie the thing we want to avoid 1536 * is to return an anon_vma that is "complex" due to having gone through 1537 * a fork). 1538 * 1539 * We also make sure that the two vma's are compatible (adjacent, 1540 * and with the same memory policies). That's all stable, even with just 1541 * a read lock on the mmap_lock. 1542 */ 1543 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, 1544 struct vm_area_struct *a, 1545 struct vm_area_struct *b) 1546 { 1547 if (anon_vma_compatible(a, b)) { 1548 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); 1549 1550 if (anon_vma && list_is_singular(&old->anon_vma_chain)) 1551 return anon_vma; 1552 } 1553 return NULL; 1554 } 1555 1556 /* 1557 * find_mergeable_anon_vma is used by anon_vma_prepare, to check 1558 * neighbouring vmas for a suitable anon_vma, before it goes off 1559 * to allocate a new anon_vma. It checks because a repetitive 1560 * sequence of mprotects and faults may otherwise lead to distinct 1561 * anon_vmas being allocated, preventing vma merge in subsequent 1562 * mprotect. 1563 */ 1564 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) 1565 { 1566 struct anon_vma *anon_vma = NULL; 1567 struct vm_area_struct *prev, *next; 1568 VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end); 1569 1570 /* Try next first. */ 1571 next = vma_iter_load(&vmi); 1572 if (next) { 1573 anon_vma = reusable_anon_vma(next, vma, next); 1574 if (anon_vma) 1575 return anon_vma; 1576 } 1577 1578 prev = vma_prev(&vmi); 1579 VM_BUG_ON_VMA(prev != vma, vma); 1580 prev = vma_prev(&vmi); 1581 /* Try prev next. */ 1582 if (prev) 1583 anon_vma = reusable_anon_vma(prev, prev, vma); 1584 1585 /* 1586 * We might reach here with anon_vma == NULL if we can't find 1587 * any reusable anon_vma. 1588 * There's no absolute need to look only at touching neighbours: 1589 * we could search further afield for "compatible" anon_vmas. 1590 * But it would probably just be a waste of time searching, 1591 * or lead to too many vmas hanging off the same anon_vma. 1592 * We're trying to allow mprotect remerging later on, 1593 * not trying to minimize memory used for anon_vmas. 1594 */ 1595 return anon_vma; 1596 } 1597 1598 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops) 1599 { 1600 return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite); 1601 } 1602 1603 static bool vma_is_shared_writable(struct vm_area_struct *vma) 1604 { 1605 return (vma->vm_flags & (VM_WRITE | VM_SHARED)) == 1606 (VM_WRITE | VM_SHARED); 1607 } 1608 1609 static bool vma_fs_can_writeback(struct vm_area_struct *vma) 1610 { 1611 /* No managed pages to writeback. */ 1612 if (vma->vm_flags & VM_PFNMAP) 1613 return false; 1614 1615 return vma->vm_file && vma->vm_file->f_mapping && 1616 mapping_can_writeback(vma->vm_file->f_mapping); 1617 } 1618 1619 /* 1620 * Does this VMA require the underlying folios to have their dirty state 1621 * tracked? 1622 */ 1623 bool vma_needs_dirty_tracking(struct vm_area_struct *vma) 1624 { 1625 /* Only shared, writable VMAs require dirty tracking. */ 1626 if (!vma_is_shared_writable(vma)) 1627 return false; 1628 1629 /* Does the filesystem need to be notified? */ 1630 if (vm_ops_needs_writenotify(vma->vm_ops)) 1631 return true; 1632 1633 /* 1634 * Even if the filesystem doesn't indicate a need for writenotify, if it 1635 * can writeback, dirty tracking is still required. 1636 */ 1637 return vma_fs_can_writeback(vma); 1638 } 1639 1640 /* 1641 * Some shared mappings will want the pages marked read-only 1642 * to track write events. If so, we'll downgrade vm_page_prot 1643 * to the private version (using protection_map[] without the 1644 * VM_SHARED bit). 1645 */ 1646 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) 1647 { 1648 /* If it was private or non-writable, the write bit is already clear */ 1649 if (!vma_is_shared_writable(vma)) 1650 return false; 1651 1652 /* The backer wishes to know when pages are first written to? */ 1653 if (vm_ops_needs_writenotify(vma->vm_ops)) 1654 return true; 1655 1656 /* The open routine did something to the protections that pgprot_modify 1657 * won't preserve? */ 1658 if (pgprot_val(vm_page_prot) != 1659 pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags))) 1660 return false; 1661 1662 /* 1663 * Do we need to track softdirty? hugetlb does not support softdirty 1664 * tracking yet. 1665 */ 1666 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma)) 1667 return true; 1668 1669 /* Do we need write faults for uffd-wp tracking? */ 1670 if (userfaultfd_wp(vma)) 1671 return true; 1672 1673 /* Can the mapping track the dirty pages? */ 1674 return vma_fs_can_writeback(vma); 1675 } 1676 1677 static DEFINE_MUTEX(mm_all_locks_mutex); 1678 1679 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) 1680 { 1681 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 1682 /* 1683 * The LSB of head.next can't change from under us 1684 * because we hold the mm_all_locks_mutex. 1685 */ 1686 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); 1687 /* 1688 * We can safely modify head.next after taking the 1689 * anon_vma->root->rwsem. If some other vma in this mm shares 1690 * the same anon_vma we won't take it again. 1691 * 1692 * No need of atomic instructions here, head.next 1693 * can't change from under us thanks to the 1694 * anon_vma->root->rwsem. 1695 */ 1696 if (__test_and_set_bit(0, (unsigned long *) 1697 &anon_vma->root->rb_root.rb_root.rb_node)) 1698 BUG(); 1699 } 1700 } 1701 1702 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) 1703 { 1704 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 1705 /* 1706 * AS_MM_ALL_LOCKS can't change from under us because 1707 * we hold the mm_all_locks_mutex. 1708 * 1709 * Operations on ->flags have to be atomic because 1710 * even if AS_MM_ALL_LOCKS is stable thanks to the 1711 * mm_all_locks_mutex, there may be other cpus 1712 * changing other bitflags in parallel to us. 1713 */ 1714 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) 1715 BUG(); 1716 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); 1717 } 1718 } 1719 1720 /* 1721 * This operation locks against the VM for all pte/vma/mm related 1722 * operations that could ever happen on a certain mm. This includes 1723 * vmtruncate, try_to_unmap, and all page faults. 1724 * 1725 * The caller must take the mmap_lock in write mode before calling 1726 * mm_take_all_locks(). The caller isn't allowed to release the 1727 * mmap_lock until mm_drop_all_locks() returns. 1728 * 1729 * mmap_lock in write mode is required in order to block all operations 1730 * that could modify pagetables and free pages without need of 1731 * altering the vma layout. It's also needed in write mode to avoid new 1732 * anon_vmas to be associated with existing vmas. 1733 * 1734 * A single task can't take more than one mm_take_all_locks() in a row 1735 * or it would deadlock. 1736 * 1737 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in 1738 * mapping->flags avoid to take the same lock twice, if more than one 1739 * vma in this mm is backed by the same anon_vma or address_space. 1740 * 1741 * We take locks in following order, accordingly to comment at beginning 1742 * of mm/rmap.c: 1743 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for 1744 * hugetlb mapping); 1745 * - all vmas marked locked 1746 * - all i_mmap_rwsem locks; 1747 * - all anon_vma->rwseml 1748 * 1749 * We can take all locks within these types randomly because the VM code 1750 * doesn't nest them and we protected from parallel mm_take_all_locks() by 1751 * mm_all_locks_mutex. 1752 * 1753 * mm_take_all_locks() and mm_drop_all_locks are expensive operations 1754 * that may have to take thousand of locks. 1755 * 1756 * mm_take_all_locks() can fail if it's interrupted by signals. 1757 */ 1758 int mm_take_all_locks(struct mm_struct *mm) 1759 { 1760 struct vm_area_struct *vma; 1761 struct anon_vma_chain *avc; 1762 VMA_ITERATOR(vmi, mm, 0); 1763 1764 mmap_assert_write_locked(mm); 1765 1766 mutex_lock(&mm_all_locks_mutex); 1767 1768 /* 1769 * vma_start_write() does not have a complement in mm_drop_all_locks() 1770 * because vma_start_write() is always asymmetrical; it marks a VMA as 1771 * being written to until mmap_write_unlock() or mmap_write_downgrade() 1772 * is reached. 1773 */ 1774 for_each_vma(vmi, vma) { 1775 if (signal_pending(current)) 1776 goto out_unlock; 1777 vma_start_write(vma); 1778 } 1779 1780 vma_iter_init(&vmi, mm, 0); 1781 for_each_vma(vmi, vma) { 1782 if (signal_pending(current)) 1783 goto out_unlock; 1784 if (vma->vm_file && vma->vm_file->f_mapping && 1785 is_vm_hugetlb_page(vma)) 1786 vm_lock_mapping(mm, vma->vm_file->f_mapping); 1787 } 1788 1789 vma_iter_init(&vmi, mm, 0); 1790 for_each_vma(vmi, vma) { 1791 if (signal_pending(current)) 1792 goto out_unlock; 1793 if (vma->vm_file && vma->vm_file->f_mapping && 1794 !is_vm_hugetlb_page(vma)) 1795 vm_lock_mapping(mm, vma->vm_file->f_mapping); 1796 } 1797 1798 vma_iter_init(&vmi, mm, 0); 1799 for_each_vma(vmi, vma) { 1800 if (signal_pending(current)) 1801 goto out_unlock; 1802 if (vma->anon_vma) 1803 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 1804 vm_lock_anon_vma(mm, avc->anon_vma); 1805 } 1806 1807 return 0; 1808 1809 out_unlock: 1810 mm_drop_all_locks(mm); 1811 return -EINTR; 1812 } 1813 1814 static void vm_unlock_anon_vma(struct anon_vma *anon_vma) 1815 { 1816 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 1817 /* 1818 * The LSB of head.next can't change to 0 from under 1819 * us because we hold the mm_all_locks_mutex. 1820 * 1821 * We must however clear the bitflag before unlocking 1822 * the vma so the users using the anon_vma->rb_root will 1823 * never see our bitflag. 1824 * 1825 * No need of atomic instructions here, head.next 1826 * can't change from under us until we release the 1827 * anon_vma->root->rwsem. 1828 */ 1829 if (!__test_and_clear_bit(0, (unsigned long *) 1830 &anon_vma->root->rb_root.rb_root.rb_node)) 1831 BUG(); 1832 anon_vma_unlock_write(anon_vma); 1833 } 1834 } 1835 1836 static void vm_unlock_mapping(struct address_space *mapping) 1837 { 1838 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 1839 /* 1840 * AS_MM_ALL_LOCKS can't change to 0 from under us 1841 * because we hold the mm_all_locks_mutex. 1842 */ 1843 i_mmap_unlock_write(mapping); 1844 if (!test_and_clear_bit(AS_MM_ALL_LOCKS, 1845 &mapping->flags)) 1846 BUG(); 1847 } 1848 } 1849 1850 /* 1851 * The mmap_lock cannot be released by the caller until 1852 * mm_drop_all_locks() returns. 1853 */ 1854 void mm_drop_all_locks(struct mm_struct *mm) 1855 { 1856 struct vm_area_struct *vma; 1857 struct anon_vma_chain *avc; 1858 VMA_ITERATOR(vmi, mm, 0); 1859 1860 mmap_assert_write_locked(mm); 1861 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); 1862 1863 for_each_vma(vmi, vma) { 1864 if (vma->anon_vma) 1865 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 1866 vm_unlock_anon_vma(avc->anon_vma); 1867 if (vma->vm_file && vma->vm_file->f_mapping) 1868 vm_unlock_mapping(vma->vm_file->f_mapping); 1869 } 1870 1871 mutex_unlock(&mm_all_locks_mutex); 1872 } 1873