vma.c (01cf21e9e119575a5a334ec19c8b2ef0c5d44c3c) | vma.c (6898c9039bc8e3027ae0fcd0f05fc2b82ccc8be0) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-or-later 2 3/* 4 * VMA-specific functions. 5 */ 6 7#include "vma_internal.h" 8#include "vma.h" --- 723 unchanged lines hidden (view full) --- 732 validate_mm(mm); 733 if (unlock) 734 mmap_read_unlock(mm); 735 736 __mt_destroy(mas_detach->tree); 737} 738 739/* | 1// SPDX-License-Identifier: GPL-2.0-or-later 2 3/* 4 * VMA-specific functions. 5 */ 6 7#include "vma_internal.h" 8#include "vma.h" --- 723 unchanged lines hidden (view full) --- 732 validate_mm(mm); 733 if (unlock) 734 mmap_read_unlock(mm); 735 736 __mt_destroy(mas_detach->tree); 737} 738 739/* |
740 * do_vmi_align_munmap() - munmap the aligned region from @start to @end. | 740 * vmi_gather_munmap_vmas() - Put all VMAs within a range into a maple tree 741 * for removal at a later date. Handles splitting first and last if necessary 742 * and marking the vmas as isolated. 743 * |
741 * @vmi: The vma iterator 742 * @vma: The starting vm_area_struct 743 * @mm: The mm_struct 744 * @start: The aligned start address to munmap. 745 * @end: The aligned end address to munmap. 746 * @uf: The userfaultfd list_head | 744 * @vmi: The vma iterator 745 * @vma: The starting vm_area_struct 746 * @mm: The mm_struct 747 * @start: The aligned start address to munmap. 748 * @end: The aligned end address to munmap. 749 * @uf: The userfaultfd list_head |
747 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on 748 * success. | 750 * @mas_detach: The maple state tracking the detached tree 751 * @locked_vm: a pointer to store the VM_LOCKED pages count. |
749 * | 752 * |
750 * Return: 0 on success and drops the lock if so directed, error and leaves the 751 * lock held otherwise. | 753 * Return: 0 on success, -EPERM on mseal vmas, -ENOMEM otherwise |
752 */ | 754 */ |
753int 754do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, | 755static int 756vmi_gather_munmap_vmas(struct vma_iterator *vmi, struct vm_area_struct *vma, |
755 struct mm_struct *mm, unsigned long start, | 757 struct mm_struct *mm, unsigned long start, |
756 unsigned long end, struct list_head *uf, bool unlock) | 758 unsigned long end, struct list_head *uf, 759 struct ma_state *mas_detach, unsigned long *locked_vm) |
757{ 758 struct vm_area_struct *next = NULL; | 760{ 761 struct vm_area_struct *next = NULL; |
759 struct maple_tree mt_detach; | |
760 int count = 0; 761 int error = -ENOMEM; | 762 int count = 0; 763 int error = -ENOMEM; |
762 unsigned long locked_vm = 0; 763 MA_STATE(mas_detach, &mt_detach, 0, 0); 764 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); 765 mt_on_stack(mt_detach); | |
766 767 /* 768 * If we need to split any vma, do it now to save pain later. 769 * 770 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially 771 * unmapped vm_area_struct will remain in use: so lower split_vma 772 * places tmp vma above, and higher split_vma places tmp vma below. 773 */ --- 10 unchanged lines hidden (view full) --- 784 goto map_count_exceeded; 785 786 /* Don't bother splitting the VMA if we can't unmap it anyway */ 787 if (!can_modify_vma(vma)) { 788 error = -EPERM; 789 goto start_split_failed; 790 } 791 | 764 765 /* 766 * If we need to split any vma, do it now to save pain later. 767 * 768 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially 769 * unmapped vm_area_struct will remain in use: so lower split_vma 770 * places tmp vma above, and higher split_vma places tmp vma below. 771 */ --- 10 unchanged lines hidden (view full) --- 782 goto map_count_exceeded; 783 784 /* Don't bother splitting the VMA if we can't unmap it anyway */ 785 if (!can_modify_vma(vma)) { 786 error = -EPERM; 787 goto start_split_failed; 788 } 789 |
792 error = __split_vma(vmi, vma, start, 1); 793 if (error) | 790 if (__split_vma(vmi, vma, start, 1)) |
794 goto start_split_failed; 795 } 796 797 /* 798 * Detach a range of VMAs from the mm. Using next as a temp variable as 799 * it is always overwritten. 800 */ 801 next = vma; 802 do { 803 if (!can_modify_vma(next)) { 804 error = -EPERM; 805 goto modify_vma_failed; 806 } 807 808 /* Does it split the end? */ 809 if (next->vm_end > end) { | 791 goto start_split_failed; 792 } 793 794 /* 795 * Detach a range of VMAs from the mm. Using next as a temp variable as 796 * it is always overwritten. 797 */ 798 next = vma; 799 do { 800 if (!can_modify_vma(next)) { 801 error = -EPERM; 802 goto modify_vma_failed; 803 } 804 805 /* Does it split the end? */ 806 if (next->vm_end > end) { |
810 error = __split_vma(vmi, next, end, 0); 811 if (error) | 807 if (__split_vma(vmi, next, end, 0)) |
812 goto end_split_failed; 813 } 814 vma_start_write(next); | 808 goto end_split_failed; 809 } 810 vma_start_write(next); |
815 mas_set(&mas_detach, count); 816 error = mas_store_gfp(&mas_detach, next, GFP_KERNEL); 817 if (error) | 811 mas_set(mas_detach, count++); 812 if (mas_store_gfp(mas_detach, next, GFP_KERNEL)) |
818 goto munmap_gather_failed; | 813 goto munmap_gather_failed; |
814 |
|
819 vma_mark_detached(next, true); 820 if (next->vm_flags & VM_LOCKED) | 815 vma_mark_detached(next, true); 816 if (next->vm_flags & VM_LOCKED) |
821 locked_vm += vma_pages(next); | 817 *locked_vm += vma_pages(next); |
822 | 818 |
823 count++; | |
824 if (unlikely(uf)) { 825 /* 826 * If userfaultfd_unmap_prep returns an error the vmas 827 * will remain split, but userland will get a 828 * highly unexpected error anyway. This is no 829 * different than the case where the first of the two 830 * __split_vma fails, but we don't undo the first 831 * split, despite we could. This is unlikely enough 832 * failure that it's not worth optimizing it for. 833 */ | 819 if (unlikely(uf)) { 820 /* 821 * If userfaultfd_unmap_prep returns an error the vmas 822 * will remain split, but userland will get a 823 * highly unexpected error anyway. This is no 824 * different than the case where the first of the two 825 * __split_vma fails, but we don't undo the first 826 * split, despite we could. This is unlikely enough 827 * failure that it's not worth optimizing it for. 828 */ |
834 error = userfaultfd_unmap_prep(next, start, end, uf); 835 836 if (error) | 829 if (userfaultfd_unmap_prep(next, start, end, uf)) |
837 goto userfaultfd_error; 838 } 839#ifdef CONFIG_DEBUG_VM_MAPLE_TREE 840 BUG_ON(next->vm_start < start); 841 BUG_ON(next->vm_start > end); 842#endif 843 } for_each_vma_range(*vmi, next, end); 844 845#if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 846 /* Make sure no VMAs are about to be lost. */ 847 { | 830 goto userfaultfd_error; 831 } 832#ifdef CONFIG_DEBUG_VM_MAPLE_TREE 833 BUG_ON(next->vm_start < start); 834 BUG_ON(next->vm_start > end); 835#endif 836 } for_each_vma_range(*vmi, next, end); 837 838#if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 839 /* Make sure no VMAs are about to be lost. */ 840 { |
848 MA_STATE(test, &mt_detach, 0, 0); | 841 MA_STATE(test, mas_detach->tree, 0, 0); |
849 struct vm_area_struct *vma_mas, *vma_test; 850 int test_count = 0; 851 852 vma_iter_set(vmi, start); 853 rcu_read_lock(); 854 vma_test = mas_find(&test, count - 1); 855 for_each_vma_range(*vmi, vma_mas, end) { 856 BUG_ON(vma_mas != vma_test); 857 test_count++; 858 vma_test = mas_next(&test, count - 1); 859 } 860 rcu_read_unlock(); 861 BUG_ON(count != test_count); 862 } 863#endif 864 865 while (vma_iter_addr(vmi) > start) 866 vma_iter_prev_range(vmi); 867 | 842 struct vm_area_struct *vma_mas, *vma_test; 843 int test_count = 0; 844 845 vma_iter_set(vmi, start); 846 rcu_read_lock(); 847 vma_test = mas_find(&test, count - 1); 848 for_each_vma_range(*vmi, vma_mas, end) { 849 BUG_ON(vma_mas != vma_test); 850 test_count++; 851 vma_test = mas_next(&test, count - 1); 852 } 853 rcu_read_unlock(); 854 BUG_ON(count != test_count); 855 } 856#endif 857 858 while (vma_iter_addr(vmi) > start) 859 vma_iter_prev_range(vmi); 860 |
861 return 0; 862 863userfaultfd_error: 864munmap_gather_failed: 865end_split_failed: 866modify_vma_failed: 867 abort_munmap_vmas(mas_detach); 868start_split_failed: 869map_count_exceeded: 870 return error; 871} 872 873/* 874 * do_vmi_align_munmap() - munmap the aligned region from @start to @end. 875 * @vmi: The vma iterator 876 * @vma: The starting vm_area_struct 877 * @mm: The mm_struct 878 * @start: The aligned start address to munmap. 879 * @end: The aligned end address to munmap. 880 * @uf: The userfaultfd list_head 881 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on 882 * success. 883 * 884 * Return: 0 on success and drops the lock if so directed, error and leaves the 885 * lock held otherwise. 886 */ 887int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 888 struct mm_struct *mm, unsigned long start, unsigned long end, 889 struct list_head *uf, bool unlock) 890{ 891 struct maple_tree mt_detach; 892 MA_STATE(mas_detach, &mt_detach, 0, 0); 893 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); 894 mt_on_stack(mt_detach); 895 int error; 896 unsigned long locked_vm = 0; 897 898 error = vmi_gather_munmap_vmas(vmi, vma, mm, start, end, uf, 899 &mas_detach, &locked_vm); 900 if (error) 901 goto gather_failed; 902 |
|
868 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL); 869 if (error) 870 goto clear_tree_failed; 871 872 /* Point of no return */ 873 vmi_complete_munmap_vmas(vmi, vma, mm, start, end, unlock, &mas_detach, 874 locked_vm); | 903 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL); 904 if (error) 905 goto clear_tree_failed; 906 907 /* Point of no return */ 908 vmi_complete_munmap_vmas(vmi, vma, mm, start, end, unlock, &mas_detach, 909 locked_vm); |
875 | |
876 return 0; 877 | 910 return 0; 911 |
878modify_vma_failed: | |
879clear_tree_failed: | 912clear_tree_failed: |
880userfaultfd_error: 881munmap_gather_failed: 882end_split_failed: | |
883 abort_munmap_vmas(&mas_detach); | 913 abort_munmap_vmas(&mas_detach); |
884start_split_failed: 885map_count_exceeded: | 914gather_failed: |
886 validate_mm(mm); 887 return error; 888} 889 890/* 891 * do_vmi_munmap() - munmap a given range. 892 * @vmi: The vma iterator 893 * @mm: The mm_struct --- 919 unchanged lines hidden --- | 915 validate_mm(mm); 916 return error; 917} 918 919/* 920 * do_vmi_munmap() - munmap a given range. 921 * @vmi: The vma iterator 922 * @mm: The mm_struct --- 919 unchanged lines hidden --- |