vma.c (d744f4acb81ae2f2c33bce71da1f65be32ed1d65) vma.c (9c3ebeda8fb5a8e9e82ab9364ec3d4b80cd0ec3d)
1// SPDX-License-Identifier: GPL-2.0-or-later
2
3/*
4 * VMA-specific functions.
5 */
6
7#include "vma_internal.h"
8#include "vma.h"

--- 632 unchanged lines hidden (view full) ---

641 vp->remove2 = NULL;
642 goto again;
643 }
644 }
645 if (vp->insert && vp->file)
646 uprobe_mmap(vp->insert);
647}
648
1// SPDX-License-Identifier: GPL-2.0-or-later
2
3/*
4 * VMA-specific functions.
5 */
6
7#include "vma_internal.h"
8#include "vma.h"

--- 632 unchanged lines hidden (view full) ---

641 vp->remove2 = NULL;
642 goto again;
643 }
644 }
645 if (vp->insert && vp->file)
646 uprobe_mmap(vp->insert);
647}
648
649static void vms_complete_pte_clear(struct vma_munmap_struct *vms,
650 struct ma_state *mas_detach, bool mm_wr_locked)
651{
652 struct mmu_gather tlb;
653
654 /*
655 * We can free page tables without write-locking mmap_lock because VMAs
656 * were isolated before we downgraded mmap_lock.
657 */
658 mas_set(mas_detach, 1);
659 lru_add_drain();
660 tlb_gather_mmu(&tlb, vms->mm);
661 update_hiwater_rss(vms->mm);
662 unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end, vms->vma_count, mm_wr_locked);
663 mas_set(mas_detach, 1);
664 /* start and end may be different if there is no prev or next vma. */
665 free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start, vms->unmap_end, mm_wr_locked);
666 tlb_finish_mmu(&tlb);
667}
668
649/*
650 * vms_complete_munmap_vmas() - Finish the munmap() operation
651 * @vms: The vma munmap struct
652 * @mas_detach: The maple state of the detached vmas
653 *
654 * This updates the mm_struct, unmaps the region, frees the resources
655 * used for the munmap() and may downgrade the lock - if requested. Everything
656 * needed to be done once the vma maple tree is updated.

--- 5 unchanged lines hidden (view full) ---

662 struct mm_struct *mm;
663
664 mm = vms->mm;
665 mm->map_count -= vms->vma_count;
666 mm->locked_vm -= vms->locked_vm;
667 if (vms->unlock)
668 mmap_write_downgrade(mm);
669
669/*
670 * vms_complete_munmap_vmas() - Finish the munmap() operation
671 * @vms: The vma munmap struct
672 * @mas_detach: The maple state of the detached vmas
673 *
674 * This updates the mm_struct, unmaps the region, frees the resources
675 * used for the munmap() and may downgrade the lock - if requested. Everything
676 * needed to be done once the vma maple tree is updated.

--- 5 unchanged lines hidden (view full) ---

682 struct mm_struct *mm;
683
684 mm = vms->mm;
685 mm->map_count -= vms->vma_count;
686 mm->locked_vm -= vms->locked_vm;
687 if (vms->unlock)
688 mmap_write_downgrade(mm);
689
670 /*
671 * We can free page tables without write-locking mmap_lock because VMAs
672 * were isolated before we downgraded mmap_lock.
673 */
674 mas_set(mas_detach, 1);
675 unmap_region(mm, mas_detach, vms->vma, vms->prev, vms->next,
676 vms->start, vms->end, vms->vma_count, !vms->unlock);
690 vms_complete_pte_clear(vms, mas_detach, !vms->unlock);
677 /* Update high watermark before we lower total_vm */
678 update_hiwater_vm(mm);
679 /* Stat accounting */
680 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
681 /* Paranoid bookkeeping */
682 VM_WARN_ON(vms->exec_vm > mm->exec_vm);
683 VM_WARN_ON(vms->stack_vm > mm->stack_vm);
684 VM_WARN_ON(vms->data_vm > mm->data_vm);

--- 55 unchanged lines hidden (view full) ---

740 error = -EPERM;
741 goto start_split_failed;
742 }
743
744 if (__split_vma(vms->vmi, vms->vma, vms->start, 1))
745 goto start_split_failed;
746 }
747 vms->prev = vma_prev(vms->vmi);
691 /* Update high watermark before we lower total_vm */
692 update_hiwater_vm(mm);
693 /* Stat accounting */
694 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
695 /* Paranoid bookkeeping */
696 VM_WARN_ON(vms->exec_vm > mm->exec_vm);
697 VM_WARN_ON(vms->stack_vm > mm->stack_vm);
698 VM_WARN_ON(vms->data_vm > mm->data_vm);

--- 55 unchanged lines hidden (view full) ---

754 error = -EPERM;
755 goto start_split_failed;
756 }
757
758 if (__split_vma(vms->vmi, vms->vma, vms->start, 1))
759 goto start_split_failed;
760 }
761 vms->prev = vma_prev(vms->vmi);
762 if (vms->prev)
763 vms->unmap_start = vms->prev->vm_end;
748
749 /*
750 * Detach a range of VMAs from the mm. Using next as a temp variable as
751 * it is always overwritten.
752 */
753 for_each_vma_range(*(vms->vmi), next, vms->end) {
754 long nrpages;
755

--- 44 unchanged lines hidden (view full) ---

800 }
801#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
802 BUG_ON(next->vm_start < vms->start);
803 BUG_ON(next->vm_start > vms->end);
804#endif
805 }
806
807 vms->next = vma_next(vms->vmi);
764
765 /*
766 * Detach a range of VMAs from the mm. Using next as a temp variable as
767 * it is always overwritten.
768 */
769 for_each_vma_range(*(vms->vmi), next, vms->end) {
770 long nrpages;
771

--- 44 unchanged lines hidden (view full) ---

816 }
817#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
818 BUG_ON(next->vm_start < vms->start);
819 BUG_ON(next->vm_start > vms->end);
820#endif
821 }
822
823 vms->next = vma_next(vms->vmi);
824 if (vms->next)
825 vms->unmap_end = vms->next->vm_start;
808
809#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
810 /* Make sure no VMAs are about to be lost. */
811 {
812 MA_STATE(test, mas_detach->tree, 0, 0);
813 struct vm_area_struct *vma_mas, *vma_test;
814 int test_count = 0;
815

--- 997 unchanged lines hidden ---
826
827#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
828 /* Make sure no VMAs are about to be lost. */
829 {
830 MA_STATE(test, mas_detach->tree, 0, 0);
831 struct vm_area_struct *vma_mas, *vma_test;
832 int test_count = 0;
833

--- 997 unchanged lines hidden ---