vma.c (7e7b2370ed0551d2fa8e4bd6e4bbd603fef3cdcd) | vma.c (01cf21e9e119575a5a334ec19c8b2ef0c5d44c3c) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-or-later 2 3/* 4 * VMA-specific functions. 5 */ 6 7#include "vma_internal.h" 8#include "vma.h" --- 671 unchanged lines hidden (view full) --- 680 mas_set(mas_detach, 0); 681 mas_for_each(mas_detach, vma, ULONG_MAX) 682 vma_mark_detached(vma, false); 683 684 __mt_destroy(mas_detach->tree); 685} 686 687/* | 1// SPDX-License-Identifier: GPL-2.0-or-later 2 3/* 4 * VMA-specific functions. 5 */ 6 7#include "vma_internal.h" 8#include "vma.h" --- 671 unchanged lines hidden (view full) --- 680 mas_set(mas_detach, 0); 681 mas_for_each(mas_detach, vma, ULONG_MAX) 682 vma_mark_detached(vma, false); 683 684 __mt_destroy(mas_detach->tree); 685} 686 687/* |
688 * vmi_complete_munmap_vmas() - Finish the munmap() operation 689 * @vmi: The vma iterator 690 * @vma: The first vma to be munmapped 691 * @mm: The mm struct 692 * @start: The start address 693 * @end: The end address 694 * @unlock: Unlock the mm or not 695 * @mas_detach: them maple state of the detached vma maple tree 696 * @locked_vm: The locked_vm count in the detached vmas 697 * 698 * This function updates the mm_struct, unmaps the region, frees the resources 699 * used for the munmap() and may downgrade the lock - if requested. Everything 700 * needed to be done once the vma maple tree is updated. 701 */ 702static void 703vmi_complete_munmap_vmas(struct vma_iterator *vmi, struct vm_area_struct *vma, 704 struct mm_struct *mm, unsigned long start, unsigned long end, 705 bool unlock, struct ma_state *mas_detach, 706 unsigned long locked_vm) 707{ 708 struct vm_area_struct *prev, *next; 709 int count; 710 711 count = mas_detach->index + 1; 712 mm->map_count -= count; 713 mm->locked_vm -= locked_vm; 714 if (unlock) 715 mmap_write_downgrade(mm); 716 717 prev = vma_iter_prev_range(vmi); 718 next = vma_next(vmi); 719 if (next) 720 vma_iter_prev_range(vmi); 721 722 /* 723 * We can free page tables without write-locking mmap_lock because VMAs 724 * were isolated before we downgraded mmap_lock. 725 */ 726 mas_set(mas_detach, 1); 727 unmap_region(mm, mas_detach, vma, prev, next, start, end, count, 728 !unlock); 729 /* Statistics and freeing VMAs */ 730 mas_set(mas_detach, 0); 731 remove_mt(mm, mas_detach); 732 validate_mm(mm); 733 if (unlock) 734 mmap_read_unlock(mm); 735 736 __mt_destroy(mas_detach->tree); 737} 738 739/* |
|
688 * do_vmi_align_munmap() - munmap the aligned region from @start to @end. 689 * @vmi: The vma iterator 690 * @vma: The starting vm_area_struct 691 * @mm: The mm_struct 692 * @start: The aligned start address to munmap. 693 * @end: The aligned end address to munmap. 694 * @uf: The userfaultfd list_head 695 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on 696 * success. 697 * 698 * Return: 0 on success and drops the lock if so directed, error and leaves the 699 * lock held otherwise. 700 */ 701int 702do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 703 struct mm_struct *mm, unsigned long start, 704 unsigned long end, struct list_head *uf, bool unlock) 705{ | 740 * do_vmi_align_munmap() - munmap the aligned region from @start to @end. 741 * @vmi: The vma iterator 742 * @vma: The starting vm_area_struct 743 * @mm: The mm_struct 744 * @start: The aligned start address to munmap. 745 * @end: The aligned end address to munmap. 746 * @uf: The userfaultfd list_head 747 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on 748 * success. 749 * 750 * Return: 0 on success and drops the lock if so directed, error and leaves the 751 * lock held otherwise. 752 */ 753int 754do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 755 struct mm_struct *mm, unsigned long start, 756 unsigned long end, struct list_head *uf, bool unlock) 757{ |
706 struct vm_area_struct *prev, *next = NULL; | 758 struct vm_area_struct *next = NULL; |
707 struct maple_tree mt_detach; 708 int count = 0; 709 int error = -ENOMEM; 710 unsigned long locked_vm = 0; 711 MA_STATE(mas_detach, &mt_detach, 0, 0); 712 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); 713 mt_on_stack(mt_detach); 714 --- 98 unchanged lines hidden (view full) --- 813 while (vma_iter_addr(vmi) > start) 814 vma_iter_prev_range(vmi); 815 816 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL); 817 if (error) 818 goto clear_tree_failed; 819 820 /* Point of no return */ | 759 struct maple_tree mt_detach; 760 int count = 0; 761 int error = -ENOMEM; 762 unsigned long locked_vm = 0; 763 MA_STATE(mas_detach, &mt_detach, 0, 0); 764 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); 765 mt_on_stack(mt_detach); 766 --- 98 unchanged lines hidden (view full) --- 865 while (vma_iter_addr(vmi) > start) 866 vma_iter_prev_range(vmi); 867 868 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL); 869 if (error) 870 goto clear_tree_failed; 871 872 /* Point of no return */ |
821 mm->locked_vm -= locked_vm; 822 mm->map_count -= count; 823 if (unlock) 824 mmap_write_downgrade(mm); | 873 vmi_complete_munmap_vmas(vmi, vma, mm, start, end, unlock, &mas_detach, 874 locked_vm); |
825 | 875 |
826 prev = vma_iter_prev_range(vmi); 827 next = vma_next(vmi); 828 if (next) 829 vma_iter_prev_range(vmi); 830 831 /* 832 * We can free page tables without write-locking mmap_lock because VMAs 833 * were isolated before we downgraded mmap_lock. 834 */ 835 mas_set(&mas_detach, 1); 836 unmap_region(mm, &mas_detach, vma, prev, next, start, end, count, 837 !unlock); 838 /* Statistics and freeing VMAs */ 839 mas_set(&mas_detach, 0); 840 remove_mt(mm, &mas_detach); 841 validate_mm(mm); 842 if (unlock) 843 mmap_read_unlock(mm); 844 845 __mt_destroy(&mt_detach); | |
846 return 0; 847 848modify_vma_failed: 849clear_tree_failed: 850userfaultfd_error: 851munmap_gather_failed: 852end_split_failed: 853 abort_munmap_vmas(&mas_detach); --- 929 unchanged lines hidden --- | 876 return 0; 877 878modify_vma_failed: 879clear_tree_failed: 880userfaultfd_error: 881munmap_gather_failed: 882end_split_failed: 883 abort_munmap_vmas(&mas_detach); --- 929 unchanged lines hidden --- |