vma.c (94f59ea591f17d5fb77f68e820b27522596a7e9e) vma.c (f8d112a4e657c65c888e6b8a8435ef61a66e4ab8)
1// SPDX-License-Identifier: GPL-2.0-or-later
2
3/*
4 * VMA-specific functions.
5 */
6
7#include "vma_internal.h"
8#include "vma.h"

--- 122 unchanged lines hidden (view full) ---

131 return true;
132 }
133 return false;
134}
135
136/*
137 * Close a vm structure and free it.
138 */
1// SPDX-License-Identifier: GPL-2.0-or-later
2
3/*
4 * VMA-specific functions.
5 */
6
7#include "vma_internal.h"
8#include "vma.h"

--- 122 unchanged lines hidden (view full) ---

131 return true;
132 }
133 return false;
134}
135
136/*
137 * Close a vm structure and free it.
138 */
139void remove_vma(struct vm_area_struct *vma, bool unreachable)
139void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed)
140{
141 might_sleep();
140{
141 might_sleep();
142 if (vma->vm_ops && vma->vm_ops->close)
142 if (!closed && vma->vm_ops && vma->vm_ops->close)
143 vma->vm_ops->close(vma);
144 if (vma->vm_file)
145 fput(vma->vm_file);
146 mpol_put(vma_policy(vma));
147 if (unreachable)
148 __vm_area_free(vma);
149 else
150 vm_area_free(vma);

--- 365 unchanged lines hidden (view full) ---

516 goto nomem;
517
518 vma_prepare(&vp);
519 vma_adjust_trans_huge(vma, start, end, 0);
520 vma_set_range(vma, start, end, pgoff);
521 vma_iter_store(vmi, vma);
522
523 vma_complete(&vp, vmi, vma->vm_mm);
143 vma->vm_ops->close(vma);
144 if (vma->vm_file)
145 fput(vma->vm_file);
146 mpol_put(vma_policy(vma));
147 if (unreachable)
148 __vm_area_free(vma);
149 else
150 vm_area_free(vma);

--- 365 unchanged lines hidden (view full) ---

516 goto nomem;
517
518 vma_prepare(&vp);
519 vma_adjust_trans_huge(vma, start, end, 0);
520 vma_set_range(vma, start, end, pgoff);
521 vma_iter_store(vmi, vma);
522
523 vma_complete(&vp, vmi, vma->vm_mm);
524 validate_mm(vma->vm_mm);
525 return 0;
526
527nomem:
528 if (anon_dup)
529 unlink_anon_vmas(anon_dup);
530 return -ENOMEM;
531}
532

--- 107 unchanged lines hidden (view full) ---

640 vp->remove2 = NULL;
641 goto again;
642 }
643 }
644 if (vp->insert && vp->file)
645 uprobe_mmap(vp->insert);
646}
647
524 return 0;
525
526nomem:
527 if (anon_dup)
528 unlink_anon_vmas(anon_dup);
529 return -ENOMEM;
530}
531

--- 107 unchanged lines hidden (view full) ---

639 vp->remove2 = NULL;
640 goto again;
641 }
642 }
643 if (vp->insert && vp->file)
644 uprobe_mmap(vp->insert);
645}
646
648static void vms_complete_pte_clear(struct vma_munmap_struct *vms,
649 struct ma_state *mas_detach, bool mm_wr_locked)
647static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
648 struct ma_state *mas_detach, bool mm_wr_locked)
650{
651 struct mmu_gather tlb;
652
649{
650 struct mmu_gather tlb;
651
652 if (!vms->clear_ptes) /* Nothing to do */
653 return;
654
653 /*
654 * We can free page tables without write-locking mmap_lock because VMAs
655 * were isolated before we downgraded mmap_lock.
656 */
657 mas_set(mas_detach, 1);
658 lru_add_drain();
659 tlb_gather_mmu(&tlb, vms->mm);
660 update_hiwater_rss(vms->mm);
655 /*
656 * We can free page tables without write-locking mmap_lock because VMAs
657 * were isolated before we downgraded mmap_lock.
658 */
659 mas_set(mas_detach, 1);
660 lru_add_drain();
661 tlb_gather_mmu(&tlb, vms->mm);
662 update_hiwater_rss(vms->mm);
661 unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end, vms->vma_count, mm_wr_locked);
663 unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end,
664 vms->vma_count, mm_wr_locked);
665
662 mas_set(mas_detach, 1);
663 /* start and end may be different if there is no prev or next vma. */
666 mas_set(mas_detach, 1);
667 /* start and end may be different if there is no prev or next vma. */
664 free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start, vms->unmap_end, mm_wr_locked);
668 free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start,
669 vms->unmap_end, mm_wr_locked);
665 tlb_finish_mmu(&tlb);
670 tlb_finish_mmu(&tlb);
671 vms->clear_ptes = false;
666}
667
672}
673
674void vms_clean_up_area(struct vma_munmap_struct *vms,
675 struct ma_state *mas_detach, bool mm_wr_locked)
676{
677 struct vm_area_struct *vma;
678
679 if (!vms->nr_pages)
680 return;
681
682 vms_clear_ptes(vms, mas_detach, mm_wr_locked);
683 mas_set(mas_detach, 0);
684 mas_for_each(mas_detach, vma, ULONG_MAX)
685 if (vma->vm_ops && vma->vm_ops->close)
686 vma->vm_ops->close(vma);
687 vms->closed_vm_ops = true;
688}
689
668/*
669 * vms_complete_munmap_vmas() - Finish the munmap() operation
670 * @vms: The vma munmap struct
671 * @mas_detach: The maple state of the detached vmas
672 *
673 * This updates the mm_struct, unmaps the region, frees the resources
674 * used for the munmap() and may downgrade the lock - if requested. Everything
675 * needed to be done once the vma maple tree is updated.

--- 5 unchanged lines hidden (view full) ---

681 struct mm_struct *mm;
682
683 mm = vms->mm;
684 mm->map_count -= vms->vma_count;
685 mm->locked_vm -= vms->locked_vm;
686 if (vms->unlock)
687 mmap_write_downgrade(mm);
688
690/*
691 * vms_complete_munmap_vmas() - Finish the munmap() operation
692 * @vms: The vma munmap struct
693 * @mas_detach: The maple state of the detached vmas
694 *
695 * This updates the mm_struct, unmaps the region, frees the resources
696 * used for the munmap() and may downgrade the lock - if requested. Everything
697 * needed to be done once the vma maple tree is updated.

--- 5 unchanged lines hidden (view full) ---

703 struct mm_struct *mm;
704
705 mm = vms->mm;
706 mm->map_count -= vms->vma_count;
707 mm->locked_vm -= vms->locked_vm;
708 if (vms->unlock)
709 mmap_write_downgrade(mm);
710
689 vms_complete_pte_clear(vms, mas_detach, !vms->unlock);
711 if (!vms->nr_pages)
712 return;
713
714 vms_clear_ptes(vms, mas_detach, !vms->unlock);
690 /* Update high watermark before we lower total_vm */
691 update_hiwater_vm(mm);
692 /* Stat accounting */
693 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
694 /* Paranoid bookkeeping */
695 VM_WARN_ON(vms->exec_vm > mm->exec_vm);
696 VM_WARN_ON(vms->stack_vm > mm->stack_vm);
697 VM_WARN_ON(vms->data_vm > mm->data_vm);
698 mm->exec_vm -= vms->exec_vm;
699 mm->stack_vm -= vms->stack_vm;
700 mm->data_vm -= vms->data_vm;
701
702 /* Remove and clean up vmas */
703 mas_set(mas_detach, 0);
704 mas_for_each(mas_detach, vma, ULONG_MAX)
715 /* Update high watermark before we lower total_vm */
716 update_hiwater_vm(mm);
717 /* Stat accounting */
718 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
719 /* Paranoid bookkeeping */
720 VM_WARN_ON(vms->exec_vm > mm->exec_vm);
721 VM_WARN_ON(vms->stack_vm > mm->stack_vm);
722 VM_WARN_ON(vms->data_vm > mm->data_vm);
723 mm->exec_vm -= vms->exec_vm;
724 mm->stack_vm -= vms->stack_vm;
725 mm->data_vm -= vms->data_vm;
726
727 /* Remove and clean up vmas */
728 mas_set(mas_detach, 0);
729 mas_for_each(mas_detach, vma, ULONG_MAX)
705 remove_vma(vma, false);
730 remove_vma(vma, /* = */ false, vms->closed_vm_ops);
706
707 vm_unacct_memory(vms->nr_accounted);
708 validate_mm(mm);
709 if (vms->unlock)
710 mmap_read_unlock(mm);
711
712 __mt_destroy(mas_detach->tree);
713}

--- 127 unchanged lines hidden (view full) ---

841 rcu_read_unlock();
842 BUG_ON(vms->vma_count != test_count);
843 }
844#endif
845
846 while (vma_iter_addr(vms->vmi) > vms->start)
847 vma_iter_prev_range(vms->vmi);
848
731
732 vm_unacct_memory(vms->nr_accounted);
733 validate_mm(mm);
734 if (vms->unlock)
735 mmap_read_unlock(mm);
736
737 __mt_destroy(mas_detach->tree);
738}

--- 127 unchanged lines hidden (view full) ---

866 rcu_read_unlock();
867 BUG_ON(vms->vma_count != test_count);
868 }
869#endif
870
871 while (vma_iter_addr(vms->vmi) > vms->start)
872 vma_iter_prev_range(vms->vmi);
873
874 vms->clear_ptes = true;
849 return 0;
850
851userfaultfd_error:
852munmap_gather_failed:
853end_split_failed:
854modify_vma_failed:
875 return 0;
876
877userfaultfd_error:
878munmap_gather_failed:
879end_split_failed:
880modify_vma_failed:
855 abort_munmap_vmas(mas_detach);
881 abort_munmap_vmas(mas_detach, /* closed = */ false);
856start_split_failed:
857map_count_exceeded:
858 return error;
859}
860
861/*
862 * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
863 * @vmi: The vma iterator

--- 28 unchanged lines hidden (view full) ---

892 if (error)
893 goto clear_tree_failed;
894
895 /* Point of no return */
896 vms_complete_munmap_vmas(&vms, &mas_detach);
897 return 0;
898
899clear_tree_failed:
882start_split_failed:
883map_count_exceeded:
884 return error;
885}
886
887/*
888 * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
889 * @vmi: The vma iterator

--- 28 unchanged lines hidden (view full) ---

918 if (error)
919 goto clear_tree_failed;
920
921 /* Point of no return */
922 vms_complete_munmap_vmas(&vms, &mas_detach);
923 return 0;
924
925clear_tree_failed:
900 abort_munmap_vmas(&mas_detach);
926 abort_munmap_vmas(&mas_detach, /* closed = */ false);
901gather_failed:
902 validate_mm(mm);
903 return error;
904}
905
906/*
907 * do_vmi_munmap() - munmap a given range.
908 * @vmi: The vma iterator

--- 701 unchanged lines hidden (view full) ---

1610 if (userfaultfd_wp(vma))
1611 return true;
1612
1613 /* Can the mapping track the dirty pages? */
1614 return vma_fs_can_writeback(vma);
1615}
1616
1617unsigned long count_vma_pages_range(struct mm_struct *mm,
927gather_failed:
928 validate_mm(mm);
929 return error;
930}
931
932/*
933 * do_vmi_munmap() - munmap a given range.
934 * @vmi: The vma iterator

--- 701 unchanged lines hidden (view full) ---

1636 if (userfaultfd_wp(vma))
1637 return true;
1638
1639 /* Can the mapping track the dirty pages? */
1640 return vma_fs_can_writeback(vma);
1641}
1642
1643unsigned long count_vma_pages_range(struct mm_struct *mm,
1618 unsigned long addr, unsigned long end)
1644 unsigned long addr, unsigned long end,
1645 unsigned long *nr_accounted)
1619{
1620 VMA_ITERATOR(vmi, mm, addr);
1621 struct vm_area_struct *vma;
1622 unsigned long nr_pages = 0;
1623
1646{
1647 VMA_ITERATOR(vmi, mm, addr);
1648 struct vm_area_struct *vma;
1649 unsigned long nr_pages = 0;
1650
1651 *nr_accounted = 0;
1624 for_each_vma_range(vmi, vma, end) {
1625 unsigned long vm_start = max(addr, vma->vm_start);
1626 unsigned long vm_end = min(end, vma->vm_end);
1627
1628 nr_pages += PHYS_PFN(vm_end - vm_start);
1652 for_each_vma_range(vmi, vma, end) {
1653 unsigned long vm_start = max(addr, vma->vm_start);
1654 unsigned long vm_end = min(end, vma->vm_end);
1655
1656 nr_pages += PHYS_PFN(vm_end - vm_start);
1657 if (vma->vm_flags & VM_ACCOUNT)
1658 *nr_accounted += PHYS_PFN(vm_end - vm_start);
1629 }
1630
1631 return nr_pages;
1632}
1633
1634static DEFINE_MUTEX(mm_all_locks_mutex);
1635
1636static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)

--- 193 unchanged lines hidden ---
1659 }
1660
1661 return nr_pages;
1662}
1663
1664static DEFINE_MUTEX(mm_all_locks_mutex);
1665
1666static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)

--- 193 unchanged lines hidden ---