149b1b8d6SLorenzo Stoakes // SPDX-License-Identifier: GPL-2.0-or-later 249b1b8d6SLorenzo Stoakes 349b1b8d6SLorenzo Stoakes /* 449b1b8d6SLorenzo Stoakes * VMA-specific functions. 549b1b8d6SLorenzo Stoakes */ 649b1b8d6SLorenzo Stoakes 749b1b8d6SLorenzo Stoakes #include "vma_internal.h" 849b1b8d6SLorenzo Stoakes #include "vma.h" 949b1b8d6SLorenzo Stoakes 102f1c6611SLorenzo Stoakes static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next) 1149b1b8d6SLorenzo Stoakes { 122f1c6611SLorenzo Stoakes struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev; 132f1c6611SLorenzo Stoakes /* 142f1c6611SLorenzo Stoakes * If the vma has a ->close operation then the driver probably needs to 152f1c6611SLorenzo Stoakes * release per-vma resources, so we don't attempt to merge those if the 162f1c6611SLorenzo Stoakes * caller indicates the current vma may be removed as part of the merge, 172f1c6611SLorenzo Stoakes * which is the case if we are attempting to merge the next VMA into 182f1c6611SLorenzo Stoakes * this one. 192f1c6611SLorenzo Stoakes */ 202f1c6611SLorenzo Stoakes bool may_remove_vma = merge_next; 212f1c6611SLorenzo Stoakes 223e01310dSLorenzo Stoakes if (!mpol_equal(vmg->policy, vma_policy(vma))) 233e01310dSLorenzo Stoakes return false; 2449b1b8d6SLorenzo Stoakes /* 2549b1b8d6SLorenzo Stoakes * VM_SOFTDIRTY should not prevent from VMA merging, if we 2649b1b8d6SLorenzo Stoakes * match the flags but dirty bit -- the caller should mark 2749b1b8d6SLorenzo Stoakes * merged VMA as dirty. If dirty bit won't be excluded from 2849b1b8d6SLorenzo Stoakes * comparison, we increase pressure on the memory system forcing 2949b1b8d6SLorenzo Stoakes * the kernel to generate new VMAs when old one could be 3049b1b8d6SLorenzo Stoakes * extended instead. 3149b1b8d6SLorenzo Stoakes */ 322f1c6611SLorenzo Stoakes if ((vma->vm_flags ^ vmg->flags) & ~VM_SOFTDIRTY) 3349b1b8d6SLorenzo Stoakes return false; 342f1c6611SLorenzo Stoakes if (vma->vm_file != vmg->file) 3549b1b8d6SLorenzo Stoakes return false; 3649b1b8d6SLorenzo Stoakes if (may_remove_vma && vma->vm_ops && vma->vm_ops->close) 3749b1b8d6SLorenzo Stoakes return false; 382f1c6611SLorenzo Stoakes if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx)) 3949b1b8d6SLorenzo Stoakes return false; 402f1c6611SLorenzo Stoakes if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name)) 4149b1b8d6SLorenzo Stoakes return false; 4249b1b8d6SLorenzo Stoakes return true; 4349b1b8d6SLorenzo Stoakes } 4449b1b8d6SLorenzo Stoakes 4549b1b8d6SLorenzo Stoakes static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1, 4649b1b8d6SLorenzo Stoakes struct anon_vma *anon_vma2, struct vm_area_struct *vma) 4749b1b8d6SLorenzo Stoakes { 4849b1b8d6SLorenzo Stoakes /* 4949b1b8d6SLorenzo Stoakes * The list_is_singular() test is to avoid merging VMA cloned from 5049b1b8d6SLorenzo Stoakes * parents. This can improve scalability caused by anon_vma lock. 5149b1b8d6SLorenzo Stoakes */ 5249b1b8d6SLorenzo Stoakes if ((!anon_vma1 || !anon_vma2) && (!vma || 5349b1b8d6SLorenzo Stoakes list_is_singular(&vma->anon_vma_chain))) 5449b1b8d6SLorenzo Stoakes return true; 5549b1b8d6SLorenzo Stoakes return anon_vma1 == anon_vma2; 5649b1b8d6SLorenzo Stoakes } 5749b1b8d6SLorenzo Stoakes 58*cacded5eSLorenzo Stoakes /* Are the anon_vma's belonging to each VMA compatible with one another? */ 59*cacded5eSLorenzo Stoakes static inline bool are_anon_vmas_compatible(struct vm_area_struct *vma1, 60*cacded5eSLorenzo Stoakes struct vm_area_struct *vma2) 61*cacded5eSLorenzo Stoakes { 62*cacded5eSLorenzo Stoakes return is_mergeable_anon_vma(vma1->anon_vma, vma2->anon_vma, NULL); 63*cacded5eSLorenzo Stoakes } 64*cacded5eSLorenzo Stoakes 6549b1b8d6SLorenzo Stoakes /* 6649b1b8d6SLorenzo Stoakes * init_multi_vma_prep() - Initializer for struct vma_prepare 6749b1b8d6SLorenzo Stoakes * @vp: The vma_prepare struct 6849b1b8d6SLorenzo Stoakes * @vma: The vma that will be altered once locked 6949b1b8d6SLorenzo Stoakes * @next: The next vma if it is to be adjusted 7049b1b8d6SLorenzo Stoakes * @remove: The first vma to be removed 7149b1b8d6SLorenzo Stoakes * @remove2: The second vma to be removed 7249b1b8d6SLorenzo Stoakes */ 7349b1b8d6SLorenzo Stoakes static void init_multi_vma_prep(struct vma_prepare *vp, 7449b1b8d6SLorenzo Stoakes struct vm_area_struct *vma, 7549b1b8d6SLorenzo Stoakes struct vm_area_struct *next, 7649b1b8d6SLorenzo Stoakes struct vm_area_struct *remove, 7749b1b8d6SLorenzo Stoakes struct vm_area_struct *remove2) 7849b1b8d6SLorenzo Stoakes { 7949b1b8d6SLorenzo Stoakes memset(vp, 0, sizeof(struct vma_prepare)); 8049b1b8d6SLorenzo Stoakes vp->vma = vma; 8149b1b8d6SLorenzo Stoakes vp->anon_vma = vma->anon_vma; 8249b1b8d6SLorenzo Stoakes vp->remove = remove; 8349b1b8d6SLorenzo Stoakes vp->remove2 = remove2; 8449b1b8d6SLorenzo Stoakes vp->adj_next = next; 8549b1b8d6SLorenzo Stoakes if (!vp->anon_vma && next) 8649b1b8d6SLorenzo Stoakes vp->anon_vma = next->anon_vma; 8749b1b8d6SLorenzo Stoakes 8849b1b8d6SLorenzo Stoakes vp->file = vma->vm_file; 8949b1b8d6SLorenzo Stoakes if (vp->file) 9049b1b8d6SLorenzo Stoakes vp->mapping = vma->vm_file->f_mapping; 9149b1b8d6SLorenzo Stoakes 9249b1b8d6SLorenzo Stoakes } 9349b1b8d6SLorenzo Stoakes 9449b1b8d6SLorenzo Stoakes /* 9549b1b8d6SLorenzo Stoakes * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 9649b1b8d6SLorenzo Stoakes * in front of (at a lower virtual address and file offset than) the vma. 9749b1b8d6SLorenzo Stoakes * 9849b1b8d6SLorenzo Stoakes * We cannot merge two vmas if they have differently assigned (non-NULL) 9949b1b8d6SLorenzo Stoakes * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 10049b1b8d6SLorenzo Stoakes * 10149b1b8d6SLorenzo Stoakes * We don't check here for the merged mmap wrapping around the end of pagecache 10249b1b8d6SLorenzo Stoakes * indices (16TB on ia32) because do_mmap() does not permit mmap's which 10349b1b8d6SLorenzo Stoakes * wrap, nor mmaps which cover the final page at index -1UL. 10449b1b8d6SLorenzo Stoakes * 10549b1b8d6SLorenzo Stoakes * We assume the vma may be removed as part of the merge. 10649b1b8d6SLorenzo Stoakes */ 10749b1b8d6SLorenzo Stoakes bool 1082f1c6611SLorenzo Stoakes can_vma_merge_before(struct vma_merge_struct *vmg) 10949b1b8d6SLorenzo Stoakes { 1102f1c6611SLorenzo Stoakes pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start); 1112f1c6611SLorenzo Stoakes 1122f1c6611SLorenzo Stoakes if (is_mergeable_vma(vmg, /* merge_next = */ true) && 1132f1c6611SLorenzo Stoakes is_mergeable_anon_vma(vmg->anon_vma, vmg->next->anon_vma, vmg->next)) { 1142f1c6611SLorenzo Stoakes if (vmg->next->vm_pgoff == vmg->pgoff + pglen) 11549b1b8d6SLorenzo Stoakes return true; 11649b1b8d6SLorenzo Stoakes } 1172f1c6611SLorenzo Stoakes 11849b1b8d6SLorenzo Stoakes return false; 11949b1b8d6SLorenzo Stoakes } 12049b1b8d6SLorenzo Stoakes 12149b1b8d6SLorenzo Stoakes /* 12249b1b8d6SLorenzo Stoakes * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 12349b1b8d6SLorenzo Stoakes * beyond (at a higher virtual address and file offset than) the vma. 12449b1b8d6SLorenzo Stoakes * 12549b1b8d6SLorenzo Stoakes * We cannot merge two vmas if they have differently assigned (non-NULL) 12649b1b8d6SLorenzo Stoakes * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 12749b1b8d6SLorenzo Stoakes * 12849b1b8d6SLorenzo Stoakes * We assume that vma is not removed as part of the merge. 12949b1b8d6SLorenzo Stoakes */ 1302f1c6611SLorenzo Stoakes bool can_vma_merge_after(struct vma_merge_struct *vmg) 13149b1b8d6SLorenzo Stoakes { 1322f1c6611SLorenzo Stoakes if (is_mergeable_vma(vmg, /* merge_next = */ false) && 1332f1c6611SLorenzo Stoakes is_mergeable_anon_vma(vmg->anon_vma, vmg->prev->anon_vma, vmg->prev)) { 1342f1c6611SLorenzo Stoakes if (vmg->prev->vm_pgoff + vma_pages(vmg->prev) == vmg->pgoff) 13549b1b8d6SLorenzo Stoakes return true; 13649b1b8d6SLorenzo Stoakes } 13749b1b8d6SLorenzo Stoakes return false; 13849b1b8d6SLorenzo Stoakes } 13949b1b8d6SLorenzo Stoakes 14049b1b8d6SLorenzo Stoakes /* 141*cacded5eSLorenzo Stoakes * Can the proposed VMA be merged with the left (previous) VMA taking into 142*cacded5eSLorenzo Stoakes * account the start position of the proposed range. 143*cacded5eSLorenzo Stoakes */ 144*cacded5eSLorenzo Stoakes static bool can_vma_merge_left(struct vma_merge_struct *vmg) 145*cacded5eSLorenzo Stoakes 146*cacded5eSLorenzo Stoakes { 147*cacded5eSLorenzo Stoakes return vmg->prev && vmg->prev->vm_end == vmg->start && 148*cacded5eSLorenzo Stoakes can_vma_merge_after(vmg); 149*cacded5eSLorenzo Stoakes } 150*cacded5eSLorenzo Stoakes 151*cacded5eSLorenzo Stoakes /* 152*cacded5eSLorenzo Stoakes * Can the proposed VMA be merged with the right (next) VMA taking into 153*cacded5eSLorenzo Stoakes * account the end position of the proposed range. 154*cacded5eSLorenzo Stoakes * 155*cacded5eSLorenzo Stoakes * In addition, if we can merge with the left VMA, ensure that left and right 156*cacded5eSLorenzo Stoakes * anon_vma's are also compatible. 157*cacded5eSLorenzo Stoakes */ 158*cacded5eSLorenzo Stoakes static bool can_vma_merge_right(struct vma_merge_struct *vmg, 159*cacded5eSLorenzo Stoakes bool can_merge_left) 160*cacded5eSLorenzo Stoakes { 161*cacded5eSLorenzo Stoakes if (!vmg->next || vmg->end != vmg->next->vm_start || 162*cacded5eSLorenzo Stoakes !can_vma_merge_before(vmg)) 163*cacded5eSLorenzo Stoakes return false; 164*cacded5eSLorenzo Stoakes 165*cacded5eSLorenzo Stoakes if (!can_merge_left) 166*cacded5eSLorenzo Stoakes return true; 167*cacded5eSLorenzo Stoakes 168*cacded5eSLorenzo Stoakes /* 169*cacded5eSLorenzo Stoakes * If we can merge with prev (left) and next (right), indicating that 170*cacded5eSLorenzo Stoakes * each VMA's anon_vma is compatible with the proposed anon_vma, this 171*cacded5eSLorenzo Stoakes * does not mean prev and next are compatible with EACH OTHER. 172*cacded5eSLorenzo Stoakes * 173*cacded5eSLorenzo Stoakes * We therefore check this in addition to mergeability to either side. 174*cacded5eSLorenzo Stoakes */ 175*cacded5eSLorenzo Stoakes return are_anon_vmas_compatible(vmg->prev, vmg->next); 176*cacded5eSLorenzo Stoakes } 177*cacded5eSLorenzo Stoakes 178*cacded5eSLorenzo Stoakes /* 17949b1b8d6SLorenzo Stoakes * Close a vm structure and free it. 18049b1b8d6SLorenzo Stoakes */ 181f8d112a4SLiam R. Howlett void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed) 18249b1b8d6SLorenzo Stoakes { 18349b1b8d6SLorenzo Stoakes might_sleep(); 184f8d112a4SLiam R. Howlett if (!closed && vma->vm_ops && vma->vm_ops->close) 18549b1b8d6SLorenzo Stoakes vma->vm_ops->close(vma); 18649b1b8d6SLorenzo Stoakes if (vma->vm_file) 18749b1b8d6SLorenzo Stoakes fput(vma->vm_file); 18849b1b8d6SLorenzo Stoakes mpol_put(vma_policy(vma)); 18949b1b8d6SLorenzo Stoakes if (unreachable) 19049b1b8d6SLorenzo Stoakes __vm_area_free(vma); 19149b1b8d6SLorenzo Stoakes else 19249b1b8d6SLorenzo Stoakes vm_area_free(vma); 19349b1b8d6SLorenzo Stoakes } 19449b1b8d6SLorenzo Stoakes 19549b1b8d6SLorenzo Stoakes /* 19649b1b8d6SLorenzo Stoakes * Get rid of page table information in the indicated region. 19749b1b8d6SLorenzo Stoakes * 19849b1b8d6SLorenzo Stoakes * Called with the mm semaphore held. 19949b1b8d6SLorenzo Stoakes */ 20094f59ea5SLiam R. Howlett void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, 20194f59ea5SLiam R. Howlett struct vm_area_struct *prev, struct vm_area_struct *next) 20249b1b8d6SLorenzo Stoakes { 20394f59ea5SLiam R. Howlett struct mm_struct *mm = vma->vm_mm; 20449b1b8d6SLorenzo Stoakes struct mmu_gather tlb; 20549b1b8d6SLorenzo Stoakes 20649b1b8d6SLorenzo Stoakes lru_add_drain(); 20749b1b8d6SLorenzo Stoakes tlb_gather_mmu(&tlb, mm); 20849b1b8d6SLorenzo Stoakes update_hiwater_rss(mm); 20994f59ea5SLiam R. Howlett unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end, 21094f59ea5SLiam R. Howlett /* mm_wr_locked = */ true); 21194f59ea5SLiam R. Howlett mas_set(mas, vma->vm_end); 21249b1b8d6SLorenzo Stoakes free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 21349b1b8d6SLorenzo Stoakes next ? next->vm_start : USER_PGTABLES_CEILING, 21494f59ea5SLiam R. Howlett /* mm_wr_locked = */ true); 21549b1b8d6SLorenzo Stoakes tlb_finish_mmu(&tlb); 21649b1b8d6SLorenzo Stoakes } 21749b1b8d6SLorenzo Stoakes 21849b1b8d6SLorenzo Stoakes /* 21949b1b8d6SLorenzo Stoakes * __split_vma() bypasses sysctl_max_map_count checking. We use this where it 22049b1b8d6SLorenzo Stoakes * has already been checked or doesn't make sense to fail. 221b7012d51SLiam R. Howlett * VMA Iterator will point to the original VMA. 22249b1b8d6SLorenzo Stoakes */ 22349b1b8d6SLorenzo Stoakes static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, 22449b1b8d6SLorenzo Stoakes unsigned long addr, int new_below) 22549b1b8d6SLorenzo Stoakes { 22649b1b8d6SLorenzo Stoakes struct vma_prepare vp; 22749b1b8d6SLorenzo Stoakes struct vm_area_struct *new; 22849b1b8d6SLorenzo Stoakes int err; 22949b1b8d6SLorenzo Stoakes 23049b1b8d6SLorenzo Stoakes WARN_ON(vma->vm_start >= addr); 23149b1b8d6SLorenzo Stoakes WARN_ON(vma->vm_end <= addr); 23249b1b8d6SLorenzo Stoakes 23349b1b8d6SLorenzo Stoakes if (vma->vm_ops && vma->vm_ops->may_split) { 23449b1b8d6SLorenzo Stoakes err = vma->vm_ops->may_split(vma, addr); 23549b1b8d6SLorenzo Stoakes if (err) 23649b1b8d6SLorenzo Stoakes return err; 23749b1b8d6SLorenzo Stoakes } 23849b1b8d6SLorenzo Stoakes 23949b1b8d6SLorenzo Stoakes new = vm_area_dup(vma); 24049b1b8d6SLorenzo Stoakes if (!new) 24149b1b8d6SLorenzo Stoakes return -ENOMEM; 24249b1b8d6SLorenzo Stoakes 24349b1b8d6SLorenzo Stoakes if (new_below) { 24449b1b8d6SLorenzo Stoakes new->vm_end = addr; 24549b1b8d6SLorenzo Stoakes } else { 24649b1b8d6SLorenzo Stoakes new->vm_start = addr; 24749b1b8d6SLorenzo Stoakes new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); 24849b1b8d6SLorenzo Stoakes } 24949b1b8d6SLorenzo Stoakes 25049b1b8d6SLorenzo Stoakes err = -ENOMEM; 25149b1b8d6SLorenzo Stoakes vma_iter_config(vmi, new->vm_start, new->vm_end); 25249b1b8d6SLorenzo Stoakes if (vma_iter_prealloc(vmi, new)) 25349b1b8d6SLorenzo Stoakes goto out_free_vma; 25449b1b8d6SLorenzo Stoakes 25549b1b8d6SLorenzo Stoakes err = vma_dup_policy(vma, new); 25649b1b8d6SLorenzo Stoakes if (err) 25749b1b8d6SLorenzo Stoakes goto out_free_vmi; 25849b1b8d6SLorenzo Stoakes 25949b1b8d6SLorenzo Stoakes err = anon_vma_clone(new, vma); 26049b1b8d6SLorenzo Stoakes if (err) 26149b1b8d6SLorenzo Stoakes goto out_free_mpol; 26249b1b8d6SLorenzo Stoakes 26349b1b8d6SLorenzo Stoakes if (new->vm_file) 26449b1b8d6SLorenzo Stoakes get_file(new->vm_file); 26549b1b8d6SLorenzo Stoakes 26649b1b8d6SLorenzo Stoakes if (new->vm_ops && new->vm_ops->open) 26749b1b8d6SLorenzo Stoakes new->vm_ops->open(new); 26849b1b8d6SLorenzo Stoakes 26949b1b8d6SLorenzo Stoakes vma_start_write(vma); 27049b1b8d6SLorenzo Stoakes vma_start_write(new); 27149b1b8d6SLorenzo Stoakes 27249b1b8d6SLorenzo Stoakes init_vma_prep(&vp, vma); 27349b1b8d6SLorenzo Stoakes vp.insert = new; 27449b1b8d6SLorenzo Stoakes vma_prepare(&vp); 27549b1b8d6SLorenzo Stoakes vma_adjust_trans_huge(vma, vma->vm_start, addr, 0); 27649b1b8d6SLorenzo Stoakes 27749b1b8d6SLorenzo Stoakes if (new_below) { 27849b1b8d6SLorenzo Stoakes vma->vm_start = addr; 27949b1b8d6SLorenzo Stoakes vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT; 28049b1b8d6SLorenzo Stoakes } else { 28149b1b8d6SLorenzo Stoakes vma->vm_end = addr; 28249b1b8d6SLorenzo Stoakes } 28349b1b8d6SLorenzo Stoakes 28449b1b8d6SLorenzo Stoakes /* vma_complete stores the new vma */ 28549b1b8d6SLorenzo Stoakes vma_complete(&vp, vmi, vma->vm_mm); 28689b2d2a5SLiam R. Howlett validate_mm(vma->vm_mm); 28749b1b8d6SLorenzo Stoakes 28849b1b8d6SLorenzo Stoakes /* Success. */ 28949b1b8d6SLorenzo Stoakes if (new_below) 29049b1b8d6SLorenzo Stoakes vma_next(vmi); 291b7012d51SLiam R. Howlett else 292b7012d51SLiam R. Howlett vma_prev(vmi); 293b7012d51SLiam R. Howlett 29449b1b8d6SLorenzo Stoakes return 0; 29549b1b8d6SLorenzo Stoakes 29649b1b8d6SLorenzo Stoakes out_free_mpol: 29749b1b8d6SLorenzo Stoakes mpol_put(vma_policy(new)); 29849b1b8d6SLorenzo Stoakes out_free_vmi: 29949b1b8d6SLorenzo Stoakes vma_iter_free(vmi); 30049b1b8d6SLorenzo Stoakes out_free_vma: 30149b1b8d6SLorenzo Stoakes vm_area_free(new); 30249b1b8d6SLorenzo Stoakes return err; 30349b1b8d6SLorenzo Stoakes } 30449b1b8d6SLorenzo Stoakes 30549b1b8d6SLorenzo Stoakes /* 30649b1b8d6SLorenzo Stoakes * Split a vma into two pieces at address 'addr', a new vma is allocated 30749b1b8d6SLorenzo Stoakes * either for the first part or the tail. 30849b1b8d6SLorenzo Stoakes */ 30949b1b8d6SLorenzo Stoakes static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, 31049b1b8d6SLorenzo Stoakes unsigned long addr, int new_below) 31149b1b8d6SLorenzo Stoakes { 31249b1b8d6SLorenzo Stoakes if (vma->vm_mm->map_count >= sysctl_max_map_count) 31349b1b8d6SLorenzo Stoakes return -ENOMEM; 31449b1b8d6SLorenzo Stoakes 31549b1b8d6SLorenzo Stoakes return __split_vma(vmi, vma, addr, new_below); 31649b1b8d6SLorenzo Stoakes } 31749b1b8d6SLorenzo Stoakes 31849b1b8d6SLorenzo Stoakes /* 31949b1b8d6SLorenzo Stoakes * init_vma_prep() - Initializer wrapper for vma_prepare struct 32049b1b8d6SLorenzo Stoakes * @vp: The vma_prepare struct 32149b1b8d6SLorenzo Stoakes * @vma: The vma that will be altered once locked 32249b1b8d6SLorenzo Stoakes */ 32349b1b8d6SLorenzo Stoakes void init_vma_prep(struct vma_prepare *vp, 32449b1b8d6SLorenzo Stoakes struct vm_area_struct *vma) 32549b1b8d6SLorenzo Stoakes { 32649b1b8d6SLorenzo Stoakes init_multi_vma_prep(vp, vma, NULL, NULL, NULL); 32749b1b8d6SLorenzo Stoakes } 32849b1b8d6SLorenzo Stoakes 32949b1b8d6SLorenzo Stoakes /* 33049b1b8d6SLorenzo Stoakes * Requires inode->i_mapping->i_mmap_rwsem 33149b1b8d6SLorenzo Stoakes */ 33249b1b8d6SLorenzo Stoakes static void __remove_shared_vm_struct(struct vm_area_struct *vma, 33349b1b8d6SLorenzo Stoakes struct address_space *mapping) 33449b1b8d6SLorenzo Stoakes { 33549b1b8d6SLorenzo Stoakes if (vma_is_shared_maywrite(vma)) 33649b1b8d6SLorenzo Stoakes mapping_unmap_writable(mapping); 33749b1b8d6SLorenzo Stoakes 33849b1b8d6SLorenzo Stoakes flush_dcache_mmap_lock(mapping); 33949b1b8d6SLorenzo Stoakes vma_interval_tree_remove(vma, &mapping->i_mmap); 34049b1b8d6SLorenzo Stoakes flush_dcache_mmap_unlock(mapping); 34149b1b8d6SLorenzo Stoakes } 34249b1b8d6SLorenzo Stoakes 34349b1b8d6SLorenzo Stoakes /* 34449b1b8d6SLorenzo Stoakes * vma has some anon_vma assigned, and is already inserted on that 34549b1b8d6SLorenzo Stoakes * anon_vma's interval trees. 34649b1b8d6SLorenzo Stoakes * 34749b1b8d6SLorenzo Stoakes * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the 34849b1b8d6SLorenzo Stoakes * vma must be removed from the anon_vma's interval trees using 34949b1b8d6SLorenzo Stoakes * anon_vma_interval_tree_pre_update_vma(). 35049b1b8d6SLorenzo Stoakes * 35149b1b8d6SLorenzo Stoakes * After the update, the vma will be reinserted using 35249b1b8d6SLorenzo Stoakes * anon_vma_interval_tree_post_update_vma(). 35349b1b8d6SLorenzo Stoakes * 35449b1b8d6SLorenzo Stoakes * The entire update must be protected by exclusive mmap_lock and by 35549b1b8d6SLorenzo Stoakes * the root anon_vma's mutex. 35649b1b8d6SLorenzo Stoakes */ 35749b1b8d6SLorenzo Stoakes void 35849b1b8d6SLorenzo Stoakes anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) 35949b1b8d6SLorenzo Stoakes { 36049b1b8d6SLorenzo Stoakes struct anon_vma_chain *avc; 36149b1b8d6SLorenzo Stoakes 36249b1b8d6SLorenzo Stoakes list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 36349b1b8d6SLorenzo Stoakes anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); 36449b1b8d6SLorenzo Stoakes } 36549b1b8d6SLorenzo Stoakes 36649b1b8d6SLorenzo Stoakes void 36749b1b8d6SLorenzo Stoakes anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) 36849b1b8d6SLorenzo Stoakes { 36949b1b8d6SLorenzo Stoakes struct anon_vma_chain *avc; 37049b1b8d6SLorenzo Stoakes 37149b1b8d6SLorenzo Stoakes list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 37249b1b8d6SLorenzo Stoakes anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); 37349b1b8d6SLorenzo Stoakes } 37449b1b8d6SLorenzo Stoakes 37549b1b8d6SLorenzo Stoakes static void __vma_link_file(struct vm_area_struct *vma, 37649b1b8d6SLorenzo Stoakes struct address_space *mapping) 37749b1b8d6SLorenzo Stoakes { 37849b1b8d6SLorenzo Stoakes if (vma_is_shared_maywrite(vma)) 37949b1b8d6SLorenzo Stoakes mapping_allow_writable(mapping); 38049b1b8d6SLorenzo Stoakes 38149b1b8d6SLorenzo Stoakes flush_dcache_mmap_lock(mapping); 38249b1b8d6SLorenzo Stoakes vma_interval_tree_insert(vma, &mapping->i_mmap); 38349b1b8d6SLorenzo Stoakes flush_dcache_mmap_unlock(mapping); 38449b1b8d6SLorenzo Stoakes } 38549b1b8d6SLorenzo Stoakes 38649b1b8d6SLorenzo Stoakes /* 38749b1b8d6SLorenzo Stoakes * vma_prepare() - Helper function for handling locking VMAs prior to altering 38849b1b8d6SLorenzo Stoakes * @vp: The initialized vma_prepare struct 38949b1b8d6SLorenzo Stoakes */ 39049b1b8d6SLorenzo Stoakes void vma_prepare(struct vma_prepare *vp) 39149b1b8d6SLorenzo Stoakes { 39249b1b8d6SLorenzo Stoakes if (vp->file) { 39349b1b8d6SLorenzo Stoakes uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end); 39449b1b8d6SLorenzo Stoakes 39549b1b8d6SLorenzo Stoakes if (vp->adj_next) 39649b1b8d6SLorenzo Stoakes uprobe_munmap(vp->adj_next, vp->adj_next->vm_start, 39749b1b8d6SLorenzo Stoakes vp->adj_next->vm_end); 39849b1b8d6SLorenzo Stoakes 39949b1b8d6SLorenzo Stoakes i_mmap_lock_write(vp->mapping); 40049b1b8d6SLorenzo Stoakes if (vp->insert && vp->insert->vm_file) { 40149b1b8d6SLorenzo Stoakes /* 40249b1b8d6SLorenzo Stoakes * Put into interval tree now, so instantiated pages 40349b1b8d6SLorenzo Stoakes * are visible to arm/parisc __flush_dcache_page 40449b1b8d6SLorenzo Stoakes * throughout; but we cannot insert into address 40549b1b8d6SLorenzo Stoakes * space until vma start or end is updated. 40649b1b8d6SLorenzo Stoakes */ 40749b1b8d6SLorenzo Stoakes __vma_link_file(vp->insert, 40849b1b8d6SLorenzo Stoakes vp->insert->vm_file->f_mapping); 40949b1b8d6SLorenzo Stoakes } 41049b1b8d6SLorenzo Stoakes } 41149b1b8d6SLorenzo Stoakes 41249b1b8d6SLorenzo Stoakes if (vp->anon_vma) { 41349b1b8d6SLorenzo Stoakes anon_vma_lock_write(vp->anon_vma); 41449b1b8d6SLorenzo Stoakes anon_vma_interval_tree_pre_update_vma(vp->vma); 41549b1b8d6SLorenzo Stoakes if (vp->adj_next) 41649b1b8d6SLorenzo Stoakes anon_vma_interval_tree_pre_update_vma(vp->adj_next); 41749b1b8d6SLorenzo Stoakes } 41849b1b8d6SLorenzo Stoakes 41949b1b8d6SLorenzo Stoakes if (vp->file) { 42049b1b8d6SLorenzo Stoakes flush_dcache_mmap_lock(vp->mapping); 42149b1b8d6SLorenzo Stoakes vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap); 42249b1b8d6SLorenzo Stoakes if (vp->adj_next) 42349b1b8d6SLorenzo Stoakes vma_interval_tree_remove(vp->adj_next, 42449b1b8d6SLorenzo Stoakes &vp->mapping->i_mmap); 42549b1b8d6SLorenzo Stoakes } 42649b1b8d6SLorenzo Stoakes 42749b1b8d6SLorenzo Stoakes } 42849b1b8d6SLorenzo Stoakes 42949b1b8d6SLorenzo Stoakes /* 43049b1b8d6SLorenzo Stoakes * dup_anon_vma() - Helper function to duplicate anon_vma 43149b1b8d6SLorenzo Stoakes * @dst: The destination VMA 43249b1b8d6SLorenzo Stoakes * @src: The source VMA 43349b1b8d6SLorenzo Stoakes * @dup: Pointer to the destination VMA when successful. 43449b1b8d6SLorenzo Stoakes * 43549b1b8d6SLorenzo Stoakes * Returns: 0 on success. 43649b1b8d6SLorenzo Stoakes */ 43749b1b8d6SLorenzo Stoakes static int dup_anon_vma(struct vm_area_struct *dst, 43849b1b8d6SLorenzo Stoakes struct vm_area_struct *src, struct vm_area_struct **dup) 43949b1b8d6SLorenzo Stoakes { 44049b1b8d6SLorenzo Stoakes /* 44149b1b8d6SLorenzo Stoakes * Easily overlooked: when mprotect shifts the boundary, make sure the 44249b1b8d6SLorenzo Stoakes * expanding vma has anon_vma set if the shrinking vma had, to cover any 44349b1b8d6SLorenzo Stoakes * anon pages imported. 44449b1b8d6SLorenzo Stoakes */ 44549b1b8d6SLorenzo Stoakes if (src->anon_vma && !dst->anon_vma) { 44649b1b8d6SLorenzo Stoakes int ret; 44749b1b8d6SLorenzo Stoakes 44849b1b8d6SLorenzo Stoakes vma_assert_write_locked(dst); 44949b1b8d6SLorenzo Stoakes dst->anon_vma = src->anon_vma; 45049b1b8d6SLorenzo Stoakes ret = anon_vma_clone(dst, src); 45149b1b8d6SLorenzo Stoakes if (ret) 45249b1b8d6SLorenzo Stoakes return ret; 45349b1b8d6SLorenzo Stoakes 45449b1b8d6SLorenzo Stoakes *dup = dst; 45549b1b8d6SLorenzo Stoakes } 45649b1b8d6SLorenzo Stoakes 45749b1b8d6SLorenzo Stoakes return 0; 45849b1b8d6SLorenzo Stoakes } 45949b1b8d6SLorenzo Stoakes 46049b1b8d6SLorenzo Stoakes #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 46149b1b8d6SLorenzo Stoakes void validate_mm(struct mm_struct *mm) 46249b1b8d6SLorenzo Stoakes { 46349b1b8d6SLorenzo Stoakes int bug = 0; 46449b1b8d6SLorenzo Stoakes int i = 0; 46549b1b8d6SLorenzo Stoakes struct vm_area_struct *vma; 46649b1b8d6SLorenzo Stoakes VMA_ITERATOR(vmi, mm, 0); 46749b1b8d6SLorenzo Stoakes 46849b1b8d6SLorenzo Stoakes mt_validate(&mm->mm_mt); 46949b1b8d6SLorenzo Stoakes for_each_vma(vmi, vma) { 47049b1b8d6SLorenzo Stoakes #ifdef CONFIG_DEBUG_VM_RB 47149b1b8d6SLorenzo Stoakes struct anon_vma *anon_vma = vma->anon_vma; 47249b1b8d6SLorenzo Stoakes struct anon_vma_chain *avc; 47349b1b8d6SLorenzo Stoakes #endif 47449b1b8d6SLorenzo Stoakes unsigned long vmi_start, vmi_end; 47549b1b8d6SLorenzo Stoakes bool warn = 0; 47649b1b8d6SLorenzo Stoakes 47749b1b8d6SLorenzo Stoakes vmi_start = vma_iter_addr(&vmi); 47849b1b8d6SLorenzo Stoakes vmi_end = vma_iter_end(&vmi); 47949b1b8d6SLorenzo Stoakes if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm)) 48049b1b8d6SLorenzo Stoakes warn = 1; 48149b1b8d6SLorenzo Stoakes 48249b1b8d6SLorenzo Stoakes if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm)) 48349b1b8d6SLorenzo Stoakes warn = 1; 48449b1b8d6SLorenzo Stoakes 48549b1b8d6SLorenzo Stoakes if (warn) { 48649b1b8d6SLorenzo Stoakes pr_emerg("issue in %s\n", current->comm); 48749b1b8d6SLorenzo Stoakes dump_stack(); 48849b1b8d6SLorenzo Stoakes dump_vma(vma); 48949b1b8d6SLorenzo Stoakes pr_emerg("tree range: %px start %lx end %lx\n", vma, 49049b1b8d6SLorenzo Stoakes vmi_start, vmi_end - 1); 49149b1b8d6SLorenzo Stoakes vma_iter_dump_tree(&vmi); 49249b1b8d6SLorenzo Stoakes } 49349b1b8d6SLorenzo Stoakes 49449b1b8d6SLorenzo Stoakes #ifdef CONFIG_DEBUG_VM_RB 49549b1b8d6SLorenzo Stoakes if (anon_vma) { 49649b1b8d6SLorenzo Stoakes anon_vma_lock_read(anon_vma); 49749b1b8d6SLorenzo Stoakes list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 49849b1b8d6SLorenzo Stoakes anon_vma_interval_tree_verify(avc); 49949b1b8d6SLorenzo Stoakes anon_vma_unlock_read(anon_vma); 50049b1b8d6SLorenzo Stoakes } 50149b1b8d6SLorenzo Stoakes #endif 50249b1b8d6SLorenzo Stoakes i++; 50349b1b8d6SLorenzo Stoakes } 50449b1b8d6SLorenzo Stoakes if (i != mm->map_count) { 50549b1b8d6SLorenzo Stoakes pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i); 50649b1b8d6SLorenzo Stoakes bug = 1; 50749b1b8d6SLorenzo Stoakes } 50849b1b8d6SLorenzo Stoakes VM_BUG_ON_MM(bug, mm); 50949b1b8d6SLorenzo Stoakes } 51049b1b8d6SLorenzo Stoakes #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ 51149b1b8d6SLorenzo Stoakes 51249b1b8d6SLorenzo Stoakes /* 513*cacded5eSLorenzo Stoakes * vma_merge_new_range - Attempt to merge a new VMA into address space 514*cacded5eSLorenzo Stoakes * 515*cacded5eSLorenzo Stoakes * @vmg: Describes the VMA we are adding, in the range @vmg->start to @vmg->end 516*cacded5eSLorenzo Stoakes * (exclusive), which we try to merge with any adjacent VMAs if possible. 517*cacded5eSLorenzo Stoakes * 518*cacded5eSLorenzo Stoakes * We are about to add a VMA to the address space starting at @vmg->start and 519*cacded5eSLorenzo Stoakes * ending at @vmg->end. There are three different possible scenarios: 520*cacded5eSLorenzo Stoakes * 521*cacded5eSLorenzo Stoakes * 1. There is a VMA with identical properties immediately adjacent to the 522*cacded5eSLorenzo Stoakes * proposed new VMA [@vmg->start, @vmg->end) either before or after it - 523*cacded5eSLorenzo Stoakes * EXPAND that VMA: 524*cacded5eSLorenzo Stoakes * 525*cacded5eSLorenzo Stoakes * Proposed: |-----| or |-----| 526*cacded5eSLorenzo Stoakes * Existing: |----| |----| 527*cacded5eSLorenzo Stoakes * 528*cacded5eSLorenzo Stoakes * 2. There are VMAs with identical properties immediately adjacent to the 529*cacded5eSLorenzo Stoakes * proposed new VMA [@vmg->start, @vmg->end) both before AND after it - 530*cacded5eSLorenzo Stoakes * EXPAND the former and REMOVE the latter: 531*cacded5eSLorenzo Stoakes * 532*cacded5eSLorenzo Stoakes * Proposed: |-----| 533*cacded5eSLorenzo Stoakes * Existing: |----| |----| 534*cacded5eSLorenzo Stoakes * 535*cacded5eSLorenzo Stoakes * 3. There are no VMAs immediately adjacent to the proposed new VMA or those 536*cacded5eSLorenzo Stoakes * VMAs do not have identical attributes - NO MERGE POSSIBLE. 537*cacded5eSLorenzo Stoakes * 538*cacded5eSLorenzo Stoakes * In instances where we can merge, this function returns the expanded VMA which 539*cacded5eSLorenzo Stoakes * will have its range adjusted accordingly and the underlying maple tree also 540*cacded5eSLorenzo Stoakes * adjusted. 541*cacded5eSLorenzo Stoakes * 542*cacded5eSLorenzo Stoakes * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer 543*cacded5eSLorenzo Stoakes * to the VMA we expanded. 544*cacded5eSLorenzo Stoakes * 545*cacded5eSLorenzo Stoakes * This function adjusts @vmg to provide @vmg->next if not already specified, 546*cacded5eSLorenzo Stoakes * and adjusts [@vmg->start, @vmg->end) to span the expanded range. 547*cacded5eSLorenzo Stoakes * 548*cacded5eSLorenzo Stoakes * ASSUMPTIONS: 549*cacded5eSLorenzo Stoakes * - The caller must hold a WRITE lock on the mm_struct->mmap_lock. 550*cacded5eSLorenzo Stoakes * - The caller must have determined that [@vmg->start, @vmg->end) is empty, 551*cacded5eSLorenzo Stoakes other than VMAs that will be unmapped should the operation succeed. 552*cacded5eSLorenzo Stoakes * - The caller must have specified the previous vma in @vmg->prev. 553*cacded5eSLorenzo Stoakes * - The caller must have specified the next vma in @vmg->next. 554*cacded5eSLorenzo Stoakes * - The caller must have positioned the vmi at or before the gap. 555*cacded5eSLorenzo Stoakes */ 556*cacded5eSLorenzo Stoakes struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) 557*cacded5eSLorenzo Stoakes { 558*cacded5eSLorenzo Stoakes struct vm_area_struct *prev = vmg->prev; 559*cacded5eSLorenzo Stoakes struct vm_area_struct *next = vmg->next; 560*cacded5eSLorenzo Stoakes unsigned long start = vmg->start; 561*cacded5eSLorenzo Stoakes unsigned long end = vmg->end; 562*cacded5eSLorenzo Stoakes pgoff_t pgoff = vmg->pgoff; 563*cacded5eSLorenzo Stoakes pgoff_t pglen = PHYS_PFN(end - start); 564*cacded5eSLorenzo Stoakes bool can_merge_left, can_merge_right; 565*cacded5eSLorenzo Stoakes 566*cacded5eSLorenzo Stoakes mmap_assert_write_locked(vmg->mm); 567*cacded5eSLorenzo Stoakes VM_WARN_ON(vmg->vma); 568*cacded5eSLorenzo Stoakes /* vmi must point at or before the gap. */ 569*cacded5eSLorenzo Stoakes VM_WARN_ON(vma_iter_addr(vmg->vmi) > end); 570*cacded5eSLorenzo Stoakes 571*cacded5eSLorenzo Stoakes vmg->state = VMA_MERGE_NOMERGE; 572*cacded5eSLorenzo Stoakes 573*cacded5eSLorenzo Stoakes /* Special VMAs are unmergeable, also if no prev/next. */ 574*cacded5eSLorenzo Stoakes if ((vmg->flags & VM_SPECIAL) || (!prev && !next)) 575*cacded5eSLorenzo Stoakes return NULL; 576*cacded5eSLorenzo Stoakes 577*cacded5eSLorenzo Stoakes can_merge_left = can_vma_merge_left(vmg); 578*cacded5eSLorenzo Stoakes can_merge_right = can_vma_merge_right(vmg, can_merge_left); 579*cacded5eSLorenzo Stoakes 580*cacded5eSLorenzo Stoakes /* If we can merge with the next VMA, adjust vmg accordingly. */ 581*cacded5eSLorenzo Stoakes if (can_merge_right) { 582*cacded5eSLorenzo Stoakes vmg->end = next->vm_end; 583*cacded5eSLorenzo Stoakes vmg->vma = next; 584*cacded5eSLorenzo Stoakes vmg->pgoff = next->vm_pgoff - pglen; 585*cacded5eSLorenzo Stoakes } 586*cacded5eSLorenzo Stoakes 587*cacded5eSLorenzo Stoakes /* If we can merge with the previous VMA, adjust vmg accordingly. */ 588*cacded5eSLorenzo Stoakes if (can_merge_left) { 589*cacded5eSLorenzo Stoakes vmg->start = prev->vm_start; 590*cacded5eSLorenzo Stoakes vmg->vma = prev; 591*cacded5eSLorenzo Stoakes vmg->pgoff = prev->vm_pgoff; 592*cacded5eSLorenzo Stoakes 593*cacded5eSLorenzo Stoakes vma_prev(vmg->vmi); /* Equivalent to going to the previous range */ 594*cacded5eSLorenzo Stoakes } 595*cacded5eSLorenzo Stoakes 596*cacded5eSLorenzo Stoakes /* 597*cacded5eSLorenzo Stoakes * Now try to expand adjacent VMA(s). This takes care of removing the 598*cacded5eSLorenzo Stoakes * following VMA if we have VMAs on both sides. 599*cacded5eSLorenzo Stoakes */ 600*cacded5eSLorenzo Stoakes if (vmg->vma && !vma_expand(vmg)) { 601*cacded5eSLorenzo Stoakes khugepaged_enter_vma(vmg->vma, vmg->flags); 602*cacded5eSLorenzo Stoakes vmg->state = VMA_MERGE_SUCCESS; 603*cacded5eSLorenzo Stoakes return vmg->vma; 604*cacded5eSLorenzo Stoakes } 605*cacded5eSLorenzo Stoakes 606*cacded5eSLorenzo Stoakes /* If expansion failed, reset state. Allows us to retry merge later. */ 607*cacded5eSLorenzo Stoakes vmg->vma = NULL; 608*cacded5eSLorenzo Stoakes vmg->start = start; 609*cacded5eSLorenzo Stoakes vmg->end = end; 610*cacded5eSLorenzo Stoakes vmg->pgoff = pgoff; 611*cacded5eSLorenzo Stoakes if (vmg->vma == prev) 612*cacded5eSLorenzo Stoakes vma_iter_set(vmg->vmi, start); 613*cacded5eSLorenzo Stoakes 614*cacded5eSLorenzo Stoakes return NULL; 615*cacded5eSLorenzo Stoakes } 616*cacded5eSLorenzo Stoakes 617*cacded5eSLorenzo Stoakes /* 61849b1b8d6SLorenzo Stoakes * vma_expand - Expand an existing VMA 61949b1b8d6SLorenzo Stoakes * 620fc21959fSLorenzo Stoakes * @vmg: Describes a VMA expansion operation. 62149b1b8d6SLorenzo Stoakes * 622fc21959fSLorenzo Stoakes * Expand @vma to vmg->start and vmg->end. Can expand off the start and end. 623fc21959fSLorenzo Stoakes * Will expand over vmg->next if it's different from vmg->vma and vmg->end == 624fc21959fSLorenzo Stoakes * vmg->next->vm_end. Checking if the vmg->vma can expand and merge with 625fc21959fSLorenzo Stoakes * vmg->next needs to be handled by the caller. 62649b1b8d6SLorenzo Stoakes * 627*cacded5eSLorenzo Stoakes * Returns: 0 on success. 628*cacded5eSLorenzo Stoakes * 629*cacded5eSLorenzo Stoakes * ASSUMPTIONS: 630*cacded5eSLorenzo Stoakes * - The caller must hold a WRITE lock on vmg->vma->mm->mmap_lock. 631*cacded5eSLorenzo Stoakes * - The caller must have set @vmg->vma and @vmg->next. 63249b1b8d6SLorenzo Stoakes */ 633fc21959fSLorenzo Stoakes int vma_expand(struct vma_merge_struct *vmg) 63449b1b8d6SLorenzo Stoakes { 63549b1b8d6SLorenzo Stoakes struct vm_area_struct *anon_dup = NULL; 63649b1b8d6SLorenzo Stoakes bool remove_next = false; 637fc21959fSLorenzo Stoakes struct vm_area_struct *vma = vmg->vma; 638fc21959fSLorenzo Stoakes struct vm_area_struct *next = vmg->next; 63949b1b8d6SLorenzo Stoakes struct vma_prepare vp; 64049b1b8d6SLorenzo Stoakes 641*cacded5eSLorenzo Stoakes mmap_assert_write_locked(vmg->mm); 642*cacded5eSLorenzo Stoakes 64349b1b8d6SLorenzo Stoakes vma_start_write(vma); 644fc21959fSLorenzo Stoakes if (next && (vma != next) && (vmg->end == next->vm_end)) { 64549b1b8d6SLorenzo Stoakes int ret; 64649b1b8d6SLorenzo Stoakes 64749b1b8d6SLorenzo Stoakes remove_next = true; 64849b1b8d6SLorenzo Stoakes vma_start_write(next); 64949b1b8d6SLorenzo Stoakes ret = dup_anon_vma(vma, next, &anon_dup); 65049b1b8d6SLorenzo Stoakes if (ret) 65149b1b8d6SLorenzo Stoakes return ret; 65249b1b8d6SLorenzo Stoakes } 65349b1b8d6SLorenzo Stoakes 65449b1b8d6SLorenzo Stoakes init_multi_vma_prep(&vp, vma, NULL, remove_next ? next : NULL, NULL); 65549b1b8d6SLorenzo Stoakes /* Not merging but overwriting any part of next is not handled. */ 65649b1b8d6SLorenzo Stoakes VM_WARN_ON(next && !vp.remove && 657fc21959fSLorenzo Stoakes next != vma && vmg->end > next->vm_start); 65849b1b8d6SLorenzo Stoakes /* Only handles expanding */ 659fc21959fSLorenzo Stoakes VM_WARN_ON(vma->vm_start < vmg->start || vma->vm_end > vmg->end); 66049b1b8d6SLorenzo Stoakes 66149b1b8d6SLorenzo Stoakes /* Note: vma iterator must be pointing to 'start' */ 662fc21959fSLorenzo Stoakes vma_iter_config(vmg->vmi, vmg->start, vmg->end); 663fc21959fSLorenzo Stoakes if (vma_iter_prealloc(vmg->vmi, vma)) 66449b1b8d6SLorenzo Stoakes goto nomem; 66549b1b8d6SLorenzo Stoakes 66649b1b8d6SLorenzo Stoakes vma_prepare(&vp); 667fc21959fSLorenzo Stoakes vma_adjust_trans_huge(vma, vmg->start, vmg->end, 0); 668fc21959fSLorenzo Stoakes vma_set_range(vma, vmg->start, vmg->end, vmg->pgoff); 669fc21959fSLorenzo Stoakes vma_iter_store(vmg->vmi, vma); 67049b1b8d6SLorenzo Stoakes 671fc21959fSLorenzo Stoakes vma_complete(&vp, vmg->vmi, vma->vm_mm); 67249b1b8d6SLorenzo Stoakes return 0; 67349b1b8d6SLorenzo Stoakes 67449b1b8d6SLorenzo Stoakes nomem: 675*cacded5eSLorenzo Stoakes vmg->state = VMA_MERGE_ERROR_NOMEM; 67649b1b8d6SLorenzo Stoakes if (anon_dup) 67749b1b8d6SLorenzo Stoakes unlink_anon_vmas(anon_dup); 67849b1b8d6SLorenzo Stoakes return -ENOMEM; 67949b1b8d6SLorenzo Stoakes } 68049b1b8d6SLorenzo Stoakes 68149b1b8d6SLorenzo Stoakes /* 68249b1b8d6SLorenzo Stoakes * vma_shrink() - Reduce an existing VMAs memory area 68349b1b8d6SLorenzo Stoakes * @vmi: The vma iterator 68449b1b8d6SLorenzo Stoakes * @vma: The VMA to modify 68549b1b8d6SLorenzo Stoakes * @start: The new start 68649b1b8d6SLorenzo Stoakes * @end: The new end 68749b1b8d6SLorenzo Stoakes * 68849b1b8d6SLorenzo Stoakes * Returns: 0 on success, -ENOMEM otherwise 68949b1b8d6SLorenzo Stoakes */ 69049b1b8d6SLorenzo Stoakes int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, 69149b1b8d6SLorenzo Stoakes unsigned long start, unsigned long end, pgoff_t pgoff) 69249b1b8d6SLorenzo Stoakes { 69349b1b8d6SLorenzo Stoakes struct vma_prepare vp; 69449b1b8d6SLorenzo Stoakes 69549b1b8d6SLorenzo Stoakes WARN_ON((vma->vm_start != start) && (vma->vm_end != end)); 69649b1b8d6SLorenzo Stoakes 69749b1b8d6SLorenzo Stoakes if (vma->vm_start < start) 69849b1b8d6SLorenzo Stoakes vma_iter_config(vmi, vma->vm_start, start); 69949b1b8d6SLorenzo Stoakes else 70049b1b8d6SLorenzo Stoakes vma_iter_config(vmi, end, vma->vm_end); 70149b1b8d6SLorenzo Stoakes 70249b1b8d6SLorenzo Stoakes if (vma_iter_prealloc(vmi, NULL)) 70349b1b8d6SLorenzo Stoakes return -ENOMEM; 70449b1b8d6SLorenzo Stoakes 70549b1b8d6SLorenzo Stoakes vma_start_write(vma); 70649b1b8d6SLorenzo Stoakes 70749b1b8d6SLorenzo Stoakes init_vma_prep(&vp, vma); 70849b1b8d6SLorenzo Stoakes vma_prepare(&vp); 70949b1b8d6SLorenzo Stoakes vma_adjust_trans_huge(vma, start, end, 0); 71049b1b8d6SLorenzo Stoakes 71149b1b8d6SLorenzo Stoakes vma_iter_clear(vmi); 71249b1b8d6SLorenzo Stoakes vma_set_range(vma, start, end, pgoff); 71349b1b8d6SLorenzo Stoakes vma_complete(&vp, vmi, vma->vm_mm); 71489b2d2a5SLiam R. Howlett validate_mm(vma->vm_mm); 71549b1b8d6SLorenzo Stoakes return 0; 71649b1b8d6SLorenzo Stoakes } 71749b1b8d6SLorenzo Stoakes 71849b1b8d6SLorenzo Stoakes /* 71949b1b8d6SLorenzo Stoakes * vma_complete- Helper function for handling the unlocking after altering VMAs, 72049b1b8d6SLorenzo Stoakes * or for inserting a VMA. 72149b1b8d6SLorenzo Stoakes * 72249b1b8d6SLorenzo Stoakes * @vp: The vma_prepare struct 72349b1b8d6SLorenzo Stoakes * @vmi: The vma iterator 72449b1b8d6SLorenzo Stoakes * @mm: The mm_struct 72549b1b8d6SLorenzo Stoakes */ 72649b1b8d6SLorenzo Stoakes void vma_complete(struct vma_prepare *vp, 72749b1b8d6SLorenzo Stoakes struct vma_iterator *vmi, struct mm_struct *mm) 72849b1b8d6SLorenzo Stoakes { 72949b1b8d6SLorenzo Stoakes if (vp->file) { 73049b1b8d6SLorenzo Stoakes if (vp->adj_next) 73149b1b8d6SLorenzo Stoakes vma_interval_tree_insert(vp->adj_next, 73249b1b8d6SLorenzo Stoakes &vp->mapping->i_mmap); 73349b1b8d6SLorenzo Stoakes vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap); 73449b1b8d6SLorenzo Stoakes flush_dcache_mmap_unlock(vp->mapping); 73549b1b8d6SLorenzo Stoakes } 73649b1b8d6SLorenzo Stoakes 73749b1b8d6SLorenzo Stoakes if (vp->remove && vp->file) { 73849b1b8d6SLorenzo Stoakes __remove_shared_vm_struct(vp->remove, vp->mapping); 73949b1b8d6SLorenzo Stoakes if (vp->remove2) 74049b1b8d6SLorenzo Stoakes __remove_shared_vm_struct(vp->remove2, vp->mapping); 74149b1b8d6SLorenzo Stoakes } else if (vp->insert) { 74249b1b8d6SLorenzo Stoakes /* 74349b1b8d6SLorenzo Stoakes * split_vma has split insert from vma, and needs 74449b1b8d6SLorenzo Stoakes * us to insert it before dropping the locks 74549b1b8d6SLorenzo Stoakes * (it may either follow vma or precede it). 74649b1b8d6SLorenzo Stoakes */ 74749b1b8d6SLorenzo Stoakes vma_iter_store(vmi, vp->insert); 74849b1b8d6SLorenzo Stoakes mm->map_count++; 74949b1b8d6SLorenzo Stoakes } 75049b1b8d6SLorenzo Stoakes 75149b1b8d6SLorenzo Stoakes if (vp->anon_vma) { 75249b1b8d6SLorenzo Stoakes anon_vma_interval_tree_post_update_vma(vp->vma); 75349b1b8d6SLorenzo Stoakes if (vp->adj_next) 75449b1b8d6SLorenzo Stoakes anon_vma_interval_tree_post_update_vma(vp->adj_next); 75549b1b8d6SLorenzo Stoakes anon_vma_unlock_write(vp->anon_vma); 75649b1b8d6SLorenzo Stoakes } 75749b1b8d6SLorenzo Stoakes 75849b1b8d6SLorenzo Stoakes if (vp->file) { 75949b1b8d6SLorenzo Stoakes i_mmap_unlock_write(vp->mapping); 76049b1b8d6SLorenzo Stoakes uprobe_mmap(vp->vma); 76149b1b8d6SLorenzo Stoakes 76249b1b8d6SLorenzo Stoakes if (vp->adj_next) 76349b1b8d6SLorenzo Stoakes uprobe_mmap(vp->adj_next); 76449b1b8d6SLorenzo Stoakes } 76549b1b8d6SLorenzo Stoakes 76649b1b8d6SLorenzo Stoakes if (vp->remove) { 76749b1b8d6SLorenzo Stoakes again: 76849b1b8d6SLorenzo Stoakes vma_mark_detached(vp->remove, true); 76949b1b8d6SLorenzo Stoakes if (vp->file) { 77049b1b8d6SLorenzo Stoakes uprobe_munmap(vp->remove, vp->remove->vm_start, 77149b1b8d6SLorenzo Stoakes vp->remove->vm_end); 77249b1b8d6SLorenzo Stoakes fput(vp->file); 77349b1b8d6SLorenzo Stoakes } 77449b1b8d6SLorenzo Stoakes if (vp->remove->anon_vma) 77549b1b8d6SLorenzo Stoakes anon_vma_merge(vp->vma, vp->remove); 77649b1b8d6SLorenzo Stoakes mm->map_count--; 77749b1b8d6SLorenzo Stoakes mpol_put(vma_policy(vp->remove)); 77849b1b8d6SLorenzo Stoakes if (!vp->remove2) 77949b1b8d6SLorenzo Stoakes WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end); 78049b1b8d6SLorenzo Stoakes vm_area_free(vp->remove); 78149b1b8d6SLorenzo Stoakes 78249b1b8d6SLorenzo Stoakes /* 78349b1b8d6SLorenzo Stoakes * In mprotect's case 6 (see comments on vma_merge), 78449b1b8d6SLorenzo Stoakes * we are removing both mid and next vmas 78549b1b8d6SLorenzo Stoakes */ 78649b1b8d6SLorenzo Stoakes if (vp->remove2) { 78749b1b8d6SLorenzo Stoakes vp->remove = vp->remove2; 78849b1b8d6SLorenzo Stoakes vp->remove2 = NULL; 78949b1b8d6SLorenzo Stoakes goto again; 79049b1b8d6SLorenzo Stoakes } 79149b1b8d6SLorenzo Stoakes } 79249b1b8d6SLorenzo Stoakes if (vp->insert && vp->file) 79349b1b8d6SLorenzo Stoakes uprobe_mmap(vp->insert); 79449b1b8d6SLorenzo Stoakes } 79549b1b8d6SLorenzo Stoakes 796f8d112a4SLiam R. Howlett static inline void vms_clear_ptes(struct vma_munmap_struct *vms, 7979c3ebedaSLiam R. Howlett struct ma_state *mas_detach, bool mm_wr_locked) 7989c3ebedaSLiam R. Howlett { 7999c3ebedaSLiam R. Howlett struct mmu_gather tlb; 8009c3ebedaSLiam R. Howlett 801f8d112a4SLiam R. Howlett if (!vms->clear_ptes) /* Nothing to do */ 802f8d112a4SLiam R. Howlett return; 803f8d112a4SLiam R. Howlett 8049c3ebedaSLiam R. Howlett /* 8059c3ebedaSLiam R. Howlett * We can free page tables without write-locking mmap_lock because VMAs 8069c3ebedaSLiam R. Howlett * were isolated before we downgraded mmap_lock. 8079c3ebedaSLiam R. Howlett */ 8089c3ebedaSLiam R. Howlett mas_set(mas_detach, 1); 8099c3ebedaSLiam R. Howlett lru_add_drain(); 81063fc66f5SLiam R. Howlett tlb_gather_mmu(&tlb, vms->vma->vm_mm); 81163fc66f5SLiam R. Howlett update_hiwater_rss(vms->vma->vm_mm); 812f8d112a4SLiam R. Howlett unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end, 813f8d112a4SLiam R. Howlett vms->vma_count, mm_wr_locked); 814f8d112a4SLiam R. Howlett 8159c3ebedaSLiam R. Howlett mas_set(mas_detach, 1); 8169c3ebedaSLiam R. Howlett /* start and end may be different if there is no prev or next vma. */ 817f8d112a4SLiam R. Howlett free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start, 818f8d112a4SLiam R. Howlett vms->unmap_end, mm_wr_locked); 8199c3ebedaSLiam R. Howlett tlb_finish_mmu(&tlb); 820f8d112a4SLiam R. Howlett vms->clear_ptes = false; 821f8d112a4SLiam R. Howlett } 822f8d112a4SLiam R. Howlett 823f8d112a4SLiam R. Howlett void vms_clean_up_area(struct vma_munmap_struct *vms, 82463fc66f5SLiam R. Howlett struct ma_state *mas_detach) 825f8d112a4SLiam R. Howlett { 826f8d112a4SLiam R. Howlett struct vm_area_struct *vma; 827f8d112a4SLiam R. Howlett 828f8d112a4SLiam R. Howlett if (!vms->nr_pages) 829f8d112a4SLiam R. Howlett return; 830f8d112a4SLiam R. Howlett 83163fc66f5SLiam R. Howlett vms_clear_ptes(vms, mas_detach, true); 832f8d112a4SLiam R. Howlett mas_set(mas_detach, 0); 833f8d112a4SLiam R. Howlett mas_for_each(mas_detach, vma, ULONG_MAX) 834f8d112a4SLiam R. Howlett if (vma->vm_ops && vma->vm_ops->close) 835f8d112a4SLiam R. Howlett vma->vm_ops->close(vma); 836f8d112a4SLiam R. Howlett vms->closed_vm_ops = true; 8379c3ebedaSLiam R. Howlett } 8389c3ebedaSLiam R. Howlett 83949b1b8d6SLorenzo Stoakes /* 840dba14840SLiam R. Howlett * vms_complete_munmap_vmas() - Finish the munmap() operation 841dba14840SLiam R. Howlett * @vms: The vma munmap struct 842dba14840SLiam R. Howlett * @mas_detach: The maple state of the detached vmas 84301cf21e9SLiam R. Howlett * 844dba14840SLiam R. Howlett * This updates the mm_struct, unmaps the region, frees the resources 84501cf21e9SLiam R. Howlett * used for the munmap() and may downgrade the lock - if requested. Everything 84601cf21e9SLiam R. Howlett * needed to be done once the vma maple tree is updated. 84701cf21e9SLiam R. Howlett */ 8489014b230SLiam R. Howlett void vms_complete_munmap_vmas(struct vma_munmap_struct *vms, 849dba14840SLiam R. Howlett struct ma_state *mas_detach) 85001cf21e9SLiam R. Howlett { 85117f1ae9bSLiam R. Howlett struct vm_area_struct *vma; 852dba14840SLiam R. Howlett struct mm_struct *mm; 85301cf21e9SLiam R. Howlett 85463fc66f5SLiam R. Howlett mm = current->mm; 855dba14840SLiam R. Howlett mm->map_count -= vms->vma_count; 856dba14840SLiam R. Howlett mm->locked_vm -= vms->locked_vm; 857dba14840SLiam R. Howlett if (vms->unlock) 85801cf21e9SLiam R. Howlett mmap_write_downgrade(mm); 85901cf21e9SLiam R. Howlett 860f8d112a4SLiam R. Howlett if (!vms->nr_pages) 861f8d112a4SLiam R. Howlett return; 862f8d112a4SLiam R. Howlett 863f8d112a4SLiam R. Howlett vms_clear_ptes(vms, mas_detach, !vms->unlock); 86417f1ae9bSLiam R. Howlett /* Update high watermark before we lower total_vm */ 86517f1ae9bSLiam R. Howlett update_hiwater_vm(mm); 86617f1ae9bSLiam R. Howlett /* Stat accounting */ 86717f1ae9bSLiam R. Howlett WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages); 86817f1ae9bSLiam R. Howlett /* Paranoid bookkeeping */ 86917f1ae9bSLiam R. Howlett VM_WARN_ON(vms->exec_vm > mm->exec_vm); 87017f1ae9bSLiam R. Howlett VM_WARN_ON(vms->stack_vm > mm->stack_vm); 87117f1ae9bSLiam R. Howlett VM_WARN_ON(vms->data_vm > mm->data_vm); 87217f1ae9bSLiam R. Howlett mm->exec_vm -= vms->exec_vm; 87317f1ae9bSLiam R. Howlett mm->stack_vm -= vms->stack_vm; 87417f1ae9bSLiam R. Howlett mm->data_vm -= vms->data_vm; 87517f1ae9bSLiam R. Howlett 87617f1ae9bSLiam R. Howlett /* Remove and clean up vmas */ 87701cf21e9SLiam R. Howlett mas_set(mas_detach, 0); 87817f1ae9bSLiam R. Howlett mas_for_each(mas_detach, vma, ULONG_MAX) 879f8d112a4SLiam R. Howlett remove_vma(vma, /* = */ false, vms->closed_vm_ops); 88017f1ae9bSLiam R. Howlett 88117f1ae9bSLiam R. Howlett vm_unacct_memory(vms->nr_accounted); 88201cf21e9SLiam R. Howlett validate_mm(mm); 883dba14840SLiam R. Howlett if (vms->unlock) 88401cf21e9SLiam R. Howlett mmap_read_unlock(mm); 88501cf21e9SLiam R. Howlett 88601cf21e9SLiam R. Howlett __mt_destroy(mas_detach->tree); 88701cf21e9SLiam R. Howlett } 88801cf21e9SLiam R. Howlett 88901cf21e9SLiam R. Howlett /* 890dba14840SLiam R. Howlett * vms_gather_munmap_vmas() - Put all VMAs within a range into a maple tree 8916898c903SLiam R. Howlett * for removal at a later date. Handles splitting first and last if necessary 8926898c903SLiam R. Howlett * and marking the vmas as isolated. 8936898c903SLiam R. Howlett * 894dba14840SLiam R. Howlett * @vms: The vma munmap struct 8956898c903SLiam R. Howlett * @mas_detach: The maple state tracking the detached tree 89649b1b8d6SLorenzo Stoakes * 8976898c903SLiam R. Howlett * Return: 0 on success, -EPERM on mseal vmas, -ENOMEM otherwise 89849b1b8d6SLorenzo Stoakes */ 8999014b230SLiam R. Howlett int vms_gather_munmap_vmas(struct vma_munmap_struct *vms, 900dba14840SLiam R. Howlett struct ma_state *mas_detach) 90149b1b8d6SLorenzo Stoakes { 90201cf21e9SLiam R. Howlett struct vm_area_struct *next = NULL; 90349b1b8d6SLorenzo Stoakes int error = -ENOMEM; 90449b1b8d6SLorenzo Stoakes 90549b1b8d6SLorenzo Stoakes /* 90649b1b8d6SLorenzo Stoakes * If we need to split any vma, do it now to save pain later. 90720831cd6SLiam R. Howlett * Does it split the first one? 90849b1b8d6SLorenzo Stoakes */ 909dba14840SLiam R. Howlett if (vms->start > vms->vma->vm_start) { 91049b1b8d6SLorenzo Stoakes 91149b1b8d6SLorenzo Stoakes /* 91249b1b8d6SLorenzo Stoakes * Make sure that map_count on return from munmap() will 91349b1b8d6SLorenzo Stoakes * not exceed its limit; but let map_count go just above 91449b1b8d6SLorenzo Stoakes * its limit temporarily, to help free resources as expected. 91549b1b8d6SLorenzo Stoakes */ 916dba14840SLiam R. Howlett if (vms->end < vms->vma->vm_end && 91763fc66f5SLiam R. Howlett vms->vma->vm_mm->map_count >= sysctl_max_map_count) 91849b1b8d6SLorenzo Stoakes goto map_count_exceeded; 91949b1b8d6SLorenzo Stoakes 920df2a7df9SPedro Falcato /* Don't bother splitting the VMA if we can't unmap it anyway */ 921dba14840SLiam R. Howlett if (!can_modify_vma(vms->vma)) { 922df2a7df9SPedro Falcato error = -EPERM; 923df2a7df9SPedro Falcato goto start_split_failed; 924df2a7df9SPedro Falcato } 925df2a7df9SPedro Falcato 926dba14840SLiam R. Howlett if (__split_vma(vms->vmi, vms->vma, vms->start, 1)) 92749b1b8d6SLorenzo Stoakes goto start_split_failed; 92849b1b8d6SLorenzo Stoakes } 92917f1ae9bSLiam R. Howlett vms->prev = vma_prev(vms->vmi); 9309c3ebedaSLiam R. Howlett if (vms->prev) 9319c3ebedaSLiam R. Howlett vms->unmap_start = vms->prev->vm_end; 93249b1b8d6SLorenzo Stoakes 93349b1b8d6SLorenzo Stoakes /* 93449b1b8d6SLorenzo Stoakes * Detach a range of VMAs from the mm. Using next as a temp variable as 93549b1b8d6SLorenzo Stoakes * it is always overwritten. 93649b1b8d6SLorenzo Stoakes */ 93717f1ae9bSLiam R. Howlett for_each_vma_range(*(vms->vmi), next, vms->end) { 93817f1ae9bSLiam R. Howlett long nrpages; 93917f1ae9bSLiam R. Howlett 940df2a7df9SPedro Falcato if (!can_modify_vma(next)) { 941df2a7df9SPedro Falcato error = -EPERM; 942df2a7df9SPedro Falcato goto modify_vma_failed; 943df2a7df9SPedro Falcato } 94449b1b8d6SLorenzo Stoakes /* Does it split the end? */ 945dba14840SLiam R. Howlett if (next->vm_end > vms->end) { 946dba14840SLiam R. Howlett if (__split_vma(vms->vmi, next, vms->end, 0)) 94749b1b8d6SLorenzo Stoakes goto end_split_failed; 94849b1b8d6SLorenzo Stoakes } 94949b1b8d6SLorenzo Stoakes vma_start_write(next); 950dba14840SLiam R. Howlett mas_set(mas_detach, vms->vma_count++); 9516898c903SLiam R. Howlett if (mas_store_gfp(mas_detach, next, GFP_KERNEL)) 95249b1b8d6SLorenzo Stoakes goto munmap_gather_failed; 9536898c903SLiam R. Howlett 95449b1b8d6SLorenzo Stoakes vma_mark_detached(next, true); 95517f1ae9bSLiam R. Howlett nrpages = vma_pages(next); 95617f1ae9bSLiam R. Howlett 95717f1ae9bSLiam R. Howlett vms->nr_pages += nrpages; 95849b1b8d6SLorenzo Stoakes if (next->vm_flags & VM_LOCKED) 95917f1ae9bSLiam R. Howlett vms->locked_vm += nrpages; 96017f1ae9bSLiam R. Howlett 96117f1ae9bSLiam R. Howlett if (next->vm_flags & VM_ACCOUNT) 96217f1ae9bSLiam R. Howlett vms->nr_accounted += nrpages; 96317f1ae9bSLiam R. Howlett 96417f1ae9bSLiam R. Howlett if (is_exec_mapping(next->vm_flags)) 96517f1ae9bSLiam R. Howlett vms->exec_vm += nrpages; 96617f1ae9bSLiam R. Howlett else if (is_stack_mapping(next->vm_flags)) 96717f1ae9bSLiam R. Howlett vms->stack_vm += nrpages; 96817f1ae9bSLiam R. Howlett else if (is_data_mapping(next->vm_flags)) 96917f1ae9bSLiam R. Howlett vms->data_vm += nrpages; 97049b1b8d6SLorenzo Stoakes 971dba14840SLiam R. Howlett if (unlikely(vms->uf)) { 97249b1b8d6SLorenzo Stoakes /* 97349b1b8d6SLorenzo Stoakes * If userfaultfd_unmap_prep returns an error the vmas 97449b1b8d6SLorenzo Stoakes * will remain split, but userland will get a 97549b1b8d6SLorenzo Stoakes * highly unexpected error anyway. This is no 97649b1b8d6SLorenzo Stoakes * different than the case where the first of the two 97749b1b8d6SLorenzo Stoakes * __split_vma fails, but we don't undo the first 97849b1b8d6SLorenzo Stoakes * split, despite we could. This is unlikely enough 97949b1b8d6SLorenzo Stoakes * failure that it's not worth optimizing it for. 98049b1b8d6SLorenzo Stoakes */ 981dba14840SLiam R. Howlett if (userfaultfd_unmap_prep(next, vms->start, vms->end, 982dba14840SLiam R. Howlett vms->uf)) 98349b1b8d6SLorenzo Stoakes goto userfaultfd_error; 98449b1b8d6SLorenzo Stoakes } 98549b1b8d6SLorenzo Stoakes #ifdef CONFIG_DEBUG_VM_MAPLE_TREE 986dba14840SLiam R. Howlett BUG_ON(next->vm_start < vms->start); 987dba14840SLiam R. Howlett BUG_ON(next->vm_start > vms->end); 98849b1b8d6SLorenzo Stoakes #endif 98917f1ae9bSLiam R. Howlett } 99017f1ae9bSLiam R. Howlett 99117f1ae9bSLiam R. Howlett vms->next = vma_next(vms->vmi); 9929c3ebedaSLiam R. Howlett if (vms->next) 9939c3ebedaSLiam R. Howlett vms->unmap_end = vms->next->vm_start; 99449b1b8d6SLorenzo Stoakes 99549b1b8d6SLorenzo Stoakes #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) 99649b1b8d6SLorenzo Stoakes /* Make sure no VMAs are about to be lost. */ 99749b1b8d6SLorenzo Stoakes { 9986898c903SLiam R. Howlett MA_STATE(test, mas_detach->tree, 0, 0); 99949b1b8d6SLorenzo Stoakes struct vm_area_struct *vma_mas, *vma_test; 100049b1b8d6SLorenzo Stoakes int test_count = 0; 100149b1b8d6SLorenzo Stoakes 1002dba14840SLiam R. Howlett vma_iter_set(vms->vmi, vms->start); 100349b1b8d6SLorenzo Stoakes rcu_read_lock(); 1004dba14840SLiam R. Howlett vma_test = mas_find(&test, vms->vma_count - 1); 1005dba14840SLiam R. Howlett for_each_vma_range(*(vms->vmi), vma_mas, vms->end) { 100649b1b8d6SLorenzo Stoakes BUG_ON(vma_mas != vma_test); 100749b1b8d6SLorenzo Stoakes test_count++; 1008dba14840SLiam R. Howlett vma_test = mas_next(&test, vms->vma_count - 1); 100949b1b8d6SLorenzo Stoakes } 101049b1b8d6SLorenzo Stoakes rcu_read_unlock(); 1011dba14840SLiam R. Howlett BUG_ON(vms->vma_count != test_count); 101249b1b8d6SLorenzo Stoakes } 101349b1b8d6SLorenzo Stoakes #endif 101449b1b8d6SLorenzo Stoakes 1015dba14840SLiam R. Howlett while (vma_iter_addr(vms->vmi) > vms->start) 1016dba14840SLiam R. Howlett vma_iter_prev_range(vms->vmi); 101749b1b8d6SLorenzo Stoakes 1018f8d112a4SLiam R. Howlett vms->clear_ptes = true; 10196898c903SLiam R. Howlett return 0; 10206898c903SLiam R. Howlett 10216898c903SLiam R. Howlett userfaultfd_error: 10226898c903SLiam R. Howlett munmap_gather_failed: 10236898c903SLiam R. Howlett end_split_failed: 10246898c903SLiam R. Howlett modify_vma_failed: 10254f87153eSLiam R. Howlett reattach_vmas(mas_detach); 10266898c903SLiam R. Howlett start_split_failed: 10276898c903SLiam R. Howlett map_count_exceeded: 10286898c903SLiam R. Howlett return error; 10296898c903SLiam R. Howlett } 10306898c903SLiam R. Howlett 10316898c903SLiam R. Howlett /* 10326898c903SLiam R. Howlett * do_vmi_align_munmap() - munmap the aligned region from @start to @end. 10336898c903SLiam R. Howlett * @vmi: The vma iterator 10346898c903SLiam R. Howlett * @vma: The starting vm_area_struct 10356898c903SLiam R. Howlett * @mm: The mm_struct 10366898c903SLiam R. Howlett * @start: The aligned start address to munmap. 10376898c903SLiam R. Howlett * @end: The aligned end address to munmap. 10386898c903SLiam R. Howlett * @uf: The userfaultfd list_head 10396898c903SLiam R. Howlett * @unlock: Set to true to drop the mmap_lock. unlocking only happens on 10406898c903SLiam R. Howlett * success. 10416898c903SLiam R. Howlett * 10426898c903SLiam R. Howlett * Return: 0 on success and drops the lock if so directed, error and leaves the 10436898c903SLiam R. Howlett * lock held otherwise. 10446898c903SLiam R. Howlett */ 10456898c903SLiam R. Howlett int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, 10466898c903SLiam R. Howlett struct mm_struct *mm, unsigned long start, unsigned long end, 10476898c903SLiam R. Howlett struct list_head *uf, bool unlock) 10486898c903SLiam R. Howlett { 10496898c903SLiam R. Howlett struct maple_tree mt_detach; 10506898c903SLiam R. Howlett MA_STATE(mas_detach, &mt_detach, 0, 0); 10516898c903SLiam R. Howlett mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); 10526898c903SLiam R. Howlett mt_on_stack(mt_detach); 1053dba14840SLiam R. Howlett struct vma_munmap_struct vms; 10546898c903SLiam R. Howlett int error; 10556898c903SLiam R. Howlett 1056dba14840SLiam R. Howlett init_vma_munmap(&vms, vmi, vma, start, end, uf, unlock); 1057dba14840SLiam R. Howlett error = vms_gather_munmap_vmas(&vms, &mas_detach); 10586898c903SLiam R. Howlett if (error) 10596898c903SLiam R. Howlett goto gather_failed; 10606898c903SLiam R. Howlett 106149b1b8d6SLorenzo Stoakes error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL); 106249b1b8d6SLorenzo Stoakes if (error) 106349b1b8d6SLorenzo Stoakes goto clear_tree_failed; 106449b1b8d6SLorenzo Stoakes 106549b1b8d6SLorenzo Stoakes /* Point of no return */ 1066dba14840SLiam R. Howlett vms_complete_munmap_vmas(&vms, &mas_detach); 106749b1b8d6SLorenzo Stoakes return 0; 106849b1b8d6SLorenzo Stoakes 106949b1b8d6SLorenzo Stoakes clear_tree_failed: 10704f87153eSLiam R. Howlett reattach_vmas(&mas_detach); 10716898c903SLiam R. Howlett gather_failed: 107249b1b8d6SLorenzo Stoakes validate_mm(mm); 107349b1b8d6SLorenzo Stoakes return error; 107449b1b8d6SLorenzo Stoakes } 107549b1b8d6SLorenzo Stoakes 107649b1b8d6SLorenzo Stoakes /* 107749b1b8d6SLorenzo Stoakes * do_vmi_munmap() - munmap a given range. 107849b1b8d6SLorenzo Stoakes * @vmi: The vma iterator 107949b1b8d6SLorenzo Stoakes * @mm: The mm_struct 108049b1b8d6SLorenzo Stoakes * @start: The start address to munmap 108149b1b8d6SLorenzo Stoakes * @len: The length of the range to munmap 108249b1b8d6SLorenzo Stoakes * @uf: The userfaultfd list_head 108349b1b8d6SLorenzo Stoakes * @unlock: set to true if the user wants to drop the mmap_lock on success 108449b1b8d6SLorenzo Stoakes * 108549b1b8d6SLorenzo Stoakes * This function takes a @mas that is either pointing to the previous VMA or set 108649b1b8d6SLorenzo Stoakes * to MA_START and sets it up to remove the mapping(s). The @len will be 108740b88644SMichael Ellerman * aligned. 108849b1b8d6SLorenzo Stoakes * 108949b1b8d6SLorenzo Stoakes * Return: 0 on success and drops the lock if so directed, error and leaves the 109049b1b8d6SLorenzo Stoakes * lock held otherwise. 109149b1b8d6SLorenzo Stoakes */ 109249b1b8d6SLorenzo Stoakes int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, 109349b1b8d6SLorenzo Stoakes unsigned long start, size_t len, struct list_head *uf, 109449b1b8d6SLorenzo Stoakes bool unlock) 109549b1b8d6SLorenzo Stoakes { 109649b1b8d6SLorenzo Stoakes unsigned long end; 109749b1b8d6SLorenzo Stoakes struct vm_area_struct *vma; 109849b1b8d6SLorenzo Stoakes 109949b1b8d6SLorenzo Stoakes if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) 110049b1b8d6SLorenzo Stoakes return -EINVAL; 110149b1b8d6SLorenzo Stoakes 110249b1b8d6SLorenzo Stoakes end = start + PAGE_ALIGN(len); 110349b1b8d6SLorenzo Stoakes if (end == start) 110449b1b8d6SLorenzo Stoakes return -EINVAL; 110549b1b8d6SLorenzo Stoakes 110649b1b8d6SLorenzo Stoakes /* Find the first overlapping VMA */ 110749b1b8d6SLorenzo Stoakes vma = vma_find(vmi, end); 110849b1b8d6SLorenzo Stoakes if (!vma) { 110949b1b8d6SLorenzo Stoakes if (unlock) 111049b1b8d6SLorenzo Stoakes mmap_write_unlock(mm); 111149b1b8d6SLorenzo Stoakes return 0; 111249b1b8d6SLorenzo Stoakes } 111349b1b8d6SLorenzo Stoakes 111449b1b8d6SLorenzo Stoakes return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); 111549b1b8d6SLorenzo Stoakes } 111649b1b8d6SLorenzo Stoakes 111749b1b8d6SLorenzo Stoakes /* 111849b1b8d6SLorenzo Stoakes * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name), 111949b1b8d6SLorenzo Stoakes * figure out whether that can be merged with its predecessor or its 112049b1b8d6SLorenzo Stoakes * successor. Or both (it neatly fills a hole). 112149b1b8d6SLorenzo Stoakes * 112249b1b8d6SLorenzo Stoakes * In most cases - when called for mmap, brk or mremap - [addr,end) is 112349b1b8d6SLorenzo Stoakes * certain not to be mapped by the time vma_merge is called; but when 112449b1b8d6SLorenzo Stoakes * called for mprotect, it is certain to be already mapped (either at 112549b1b8d6SLorenzo Stoakes * an offset within prev, or at the start of next), and the flags of 112649b1b8d6SLorenzo Stoakes * this area are about to be changed to vm_flags - and the no-change 112749b1b8d6SLorenzo Stoakes * case has already been eliminated. 112849b1b8d6SLorenzo Stoakes * 112949b1b8d6SLorenzo Stoakes * The following mprotect cases have to be considered, where **** is 113049b1b8d6SLorenzo Stoakes * the area passed down from mprotect_fixup, never extending beyond one 113149b1b8d6SLorenzo Stoakes * vma, PPPP is the previous vma, CCCC is a concurrent vma that starts 113249b1b8d6SLorenzo Stoakes * at the same address as **** and is of the same or larger span, and 113349b1b8d6SLorenzo Stoakes * NNNN the next vma after ****: 113449b1b8d6SLorenzo Stoakes * 113549b1b8d6SLorenzo Stoakes * **** **** **** 113649b1b8d6SLorenzo Stoakes * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPCCCCCC 113749b1b8d6SLorenzo Stoakes * cannot merge might become might become 113849b1b8d6SLorenzo Stoakes * PPNNNNNNNNNN PPPPPPPPPPCC 113949b1b8d6SLorenzo Stoakes * mmap, brk or case 4 below case 5 below 114049b1b8d6SLorenzo Stoakes * mremap move: 114149b1b8d6SLorenzo Stoakes * **** **** 114249b1b8d6SLorenzo Stoakes * PPPP NNNN PPPPCCCCNNNN 114349b1b8d6SLorenzo Stoakes * might become might become 114449b1b8d6SLorenzo Stoakes * PPPPPPPPPPPP 1 or PPPPPPPPPPPP 6 or 114549b1b8d6SLorenzo Stoakes * PPPPPPPPNNNN 2 or PPPPPPPPNNNN 7 or 114649b1b8d6SLorenzo Stoakes * PPPPNNNNNNNN 3 PPPPNNNNNNNN 8 114749b1b8d6SLorenzo Stoakes * 114849b1b8d6SLorenzo Stoakes * It is important for case 8 that the vma CCCC overlapping the 114949b1b8d6SLorenzo Stoakes * region **** is never going to extended over NNNN. Instead NNNN must 115049b1b8d6SLorenzo Stoakes * be extended in region **** and CCCC must be removed. This way in 115149b1b8d6SLorenzo Stoakes * all cases where vma_merge succeeds, the moment vma_merge drops the 115249b1b8d6SLorenzo Stoakes * rmap_locks, the properties of the merged vma will be already 115349b1b8d6SLorenzo Stoakes * correct for the whole merged range. Some of those properties like 115449b1b8d6SLorenzo Stoakes * vm_page_prot/vm_flags may be accessed by rmap_walks and they must 115549b1b8d6SLorenzo Stoakes * be correct for the whole merged range immediately after the 115649b1b8d6SLorenzo Stoakes * rmap_locks are released. Otherwise if NNNN would be removed and 115749b1b8d6SLorenzo Stoakes * CCCC would be extended over the NNNN range, remove_migration_ptes 115849b1b8d6SLorenzo Stoakes * or other rmap walkers (if working on addresses beyond the "end" 115949b1b8d6SLorenzo Stoakes * parameter) may establish ptes with the wrong permissions of CCCC 116049b1b8d6SLorenzo Stoakes * instead of the right permissions of NNNN. 116149b1b8d6SLorenzo Stoakes * 116249b1b8d6SLorenzo Stoakes * In the code below: 116349b1b8d6SLorenzo Stoakes * PPPP is represented by *prev 116449b1b8d6SLorenzo Stoakes * CCCC is represented by *curr or not represented at all (NULL) 116549b1b8d6SLorenzo Stoakes * NNNN is represented by *next or not represented at all (NULL) 116649b1b8d6SLorenzo Stoakes * **** is not represented - it will be merged and the vma containing the 116749b1b8d6SLorenzo Stoakes * area is returned, or the function will return NULL 116849b1b8d6SLorenzo Stoakes */ 11692f1c6611SLorenzo Stoakes static struct vm_area_struct *vma_merge(struct vma_merge_struct *vmg) 117049b1b8d6SLorenzo Stoakes { 11712f1c6611SLorenzo Stoakes struct mm_struct *mm = vmg->mm; 11722f1c6611SLorenzo Stoakes struct vm_area_struct *prev = vmg->prev; 117349b1b8d6SLorenzo Stoakes struct vm_area_struct *curr, *next, *res; 117449b1b8d6SLorenzo Stoakes struct vm_area_struct *vma, *adjust, *remove, *remove2; 117549b1b8d6SLorenzo Stoakes struct vm_area_struct *anon_dup = NULL; 117649b1b8d6SLorenzo Stoakes struct vma_prepare vp; 117749b1b8d6SLorenzo Stoakes pgoff_t vma_pgoff; 117849b1b8d6SLorenzo Stoakes int err = 0; 117949b1b8d6SLorenzo Stoakes bool merge_prev = false; 118049b1b8d6SLorenzo Stoakes bool merge_next = false; 118149b1b8d6SLorenzo Stoakes bool vma_expanded = false; 11822f1c6611SLorenzo Stoakes unsigned long addr = vmg->start; 11832f1c6611SLorenzo Stoakes unsigned long end = vmg->end; 118449b1b8d6SLorenzo Stoakes unsigned long vma_start = addr; 118549b1b8d6SLorenzo Stoakes unsigned long vma_end = end; 11862f1c6611SLorenzo Stoakes pgoff_t pglen = PHYS_PFN(end - addr); 118749b1b8d6SLorenzo Stoakes long adj_start = 0; 118849b1b8d6SLorenzo Stoakes 1189*cacded5eSLorenzo Stoakes vmg->state = VMA_MERGE_NOMERGE; 1190*cacded5eSLorenzo Stoakes 119149b1b8d6SLorenzo Stoakes /* 119249b1b8d6SLorenzo Stoakes * We later require that vma->vm_flags == vm_flags, 119349b1b8d6SLorenzo Stoakes * so this tests vma->vm_flags & VM_SPECIAL, too. 119449b1b8d6SLorenzo Stoakes */ 11952f1c6611SLorenzo Stoakes if (vmg->flags & VM_SPECIAL) 119649b1b8d6SLorenzo Stoakes return NULL; 119749b1b8d6SLorenzo Stoakes 119849b1b8d6SLorenzo Stoakes /* Does the input range span an existing VMA? (cases 5 - 8) */ 119949b1b8d6SLorenzo Stoakes curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end); 120049b1b8d6SLorenzo Stoakes 120149b1b8d6SLorenzo Stoakes if (!curr || /* cases 1 - 4 */ 120249b1b8d6SLorenzo Stoakes end == curr->vm_end) /* cases 6 - 8, adjacent VMA */ 12032f1c6611SLorenzo Stoakes next = vmg->next = vma_lookup(mm, end); 120449b1b8d6SLorenzo Stoakes else 12052f1c6611SLorenzo Stoakes next = vmg->next = NULL; /* case 5 */ 120649b1b8d6SLorenzo Stoakes 120749b1b8d6SLorenzo Stoakes if (prev) { 120849b1b8d6SLorenzo Stoakes vma_start = prev->vm_start; 120949b1b8d6SLorenzo Stoakes vma_pgoff = prev->vm_pgoff; 121049b1b8d6SLorenzo Stoakes 121149b1b8d6SLorenzo Stoakes /* Can we merge the predecessor? */ 12123e01310dSLorenzo Stoakes if (addr == prev->vm_end && can_vma_merge_after(vmg)) { 121349b1b8d6SLorenzo Stoakes merge_prev = true; 12142f1c6611SLorenzo Stoakes vma_prev(vmg->vmi); 121549b1b8d6SLorenzo Stoakes } 121649b1b8d6SLorenzo Stoakes } 121749b1b8d6SLorenzo Stoakes 121849b1b8d6SLorenzo Stoakes /* Can we merge the successor? */ 12193e01310dSLorenzo Stoakes if (next && can_vma_merge_before(vmg)) { 122049b1b8d6SLorenzo Stoakes merge_next = true; 122149b1b8d6SLorenzo Stoakes } 122249b1b8d6SLorenzo Stoakes 122349b1b8d6SLorenzo Stoakes /* Verify some invariant that must be enforced by the caller. */ 122449b1b8d6SLorenzo Stoakes VM_WARN_ON(prev && addr <= prev->vm_start); 122549b1b8d6SLorenzo Stoakes VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end)); 122649b1b8d6SLorenzo Stoakes VM_WARN_ON(addr >= end); 122749b1b8d6SLorenzo Stoakes 122849b1b8d6SLorenzo Stoakes if (!merge_prev && !merge_next) 122949b1b8d6SLorenzo Stoakes return NULL; /* Not mergeable. */ 123049b1b8d6SLorenzo Stoakes 123149b1b8d6SLorenzo Stoakes if (merge_prev) 123249b1b8d6SLorenzo Stoakes vma_start_write(prev); 123349b1b8d6SLorenzo Stoakes 123449b1b8d6SLorenzo Stoakes res = vma = prev; 123549b1b8d6SLorenzo Stoakes remove = remove2 = adjust = NULL; 123649b1b8d6SLorenzo Stoakes 123749b1b8d6SLorenzo Stoakes /* Can we merge both the predecessor and the successor? */ 123849b1b8d6SLorenzo Stoakes if (merge_prev && merge_next && 123949b1b8d6SLorenzo Stoakes is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) { 124049b1b8d6SLorenzo Stoakes vma_start_write(next); 124149b1b8d6SLorenzo Stoakes remove = next; /* case 1 */ 124249b1b8d6SLorenzo Stoakes vma_end = next->vm_end; 124349b1b8d6SLorenzo Stoakes err = dup_anon_vma(prev, next, &anon_dup); 124449b1b8d6SLorenzo Stoakes if (curr) { /* case 6 */ 124549b1b8d6SLorenzo Stoakes vma_start_write(curr); 124649b1b8d6SLorenzo Stoakes remove = curr; 124749b1b8d6SLorenzo Stoakes remove2 = next; 124849b1b8d6SLorenzo Stoakes /* 124949b1b8d6SLorenzo Stoakes * Note that the dup_anon_vma below cannot overwrite err 125049b1b8d6SLorenzo Stoakes * since the first caller would do nothing unless next 125149b1b8d6SLorenzo Stoakes * has an anon_vma. 125249b1b8d6SLorenzo Stoakes */ 125349b1b8d6SLorenzo Stoakes if (!next->anon_vma) 125449b1b8d6SLorenzo Stoakes err = dup_anon_vma(prev, curr, &anon_dup); 125549b1b8d6SLorenzo Stoakes } 125649b1b8d6SLorenzo Stoakes } else if (merge_prev) { /* case 2 */ 125749b1b8d6SLorenzo Stoakes if (curr) { 125849b1b8d6SLorenzo Stoakes vma_start_write(curr); 125949b1b8d6SLorenzo Stoakes if (end == curr->vm_end) { /* case 7 */ 126049b1b8d6SLorenzo Stoakes /* 126149b1b8d6SLorenzo Stoakes * can_vma_merge_after() assumed we would not be 126249b1b8d6SLorenzo Stoakes * removing prev vma, so it skipped the check 126349b1b8d6SLorenzo Stoakes * for vm_ops->close, but we are removing curr 126449b1b8d6SLorenzo Stoakes */ 126549b1b8d6SLorenzo Stoakes if (curr->vm_ops && curr->vm_ops->close) 126649b1b8d6SLorenzo Stoakes err = -EINVAL; 126749b1b8d6SLorenzo Stoakes remove = curr; 126849b1b8d6SLorenzo Stoakes } else { /* case 5 */ 126949b1b8d6SLorenzo Stoakes adjust = curr; 127049b1b8d6SLorenzo Stoakes adj_start = (end - curr->vm_start); 127149b1b8d6SLorenzo Stoakes } 127249b1b8d6SLorenzo Stoakes if (!err) 127349b1b8d6SLorenzo Stoakes err = dup_anon_vma(prev, curr, &anon_dup); 127449b1b8d6SLorenzo Stoakes } 127549b1b8d6SLorenzo Stoakes } else { /* merge_next */ 127649b1b8d6SLorenzo Stoakes vma_start_write(next); 127749b1b8d6SLorenzo Stoakes res = next; 127849b1b8d6SLorenzo Stoakes if (prev && addr < prev->vm_end) { /* case 4 */ 127949b1b8d6SLorenzo Stoakes vma_start_write(prev); 128049b1b8d6SLorenzo Stoakes vma_end = addr; 128149b1b8d6SLorenzo Stoakes adjust = next; 128249b1b8d6SLorenzo Stoakes adj_start = -(prev->vm_end - addr); 128349b1b8d6SLorenzo Stoakes err = dup_anon_vma(next, prev, &anon_dup); 128449b1b8d6SLorenzo Stoakes } else { 128549b1b8d6SLorenzo Stoakes /* 128649b1b8d6SLorenzo Stoakes * Note that cases 3 and 8 are the ONLY ones where prev 128749b1b8d6SLorenzo Stoakes * is permitted to be (but is not necessarily) NULL. 128849b1b8d6SLorenzo Stoakes */ 128949b1b8d6SLorenzo Stoakes vma = next; /* case 3 */ 129049b1b8d6SLorenzo Stoakes vma_start = addr; 129149b1b8d6SLorenzo Stoakes vma_end = next->vm_end; 129249b1b8d6SLorenzo Stoakes vma_pgoff = next->vm_pgoff - pglen; 129349b1b8d6SLorenzo Stoakes if (curr) { /* case 8 */ 129449b1b8d6SLorenzo Stoakes vma_pgoff = curr->vm_pgoff; 129549b1b8d6SLorenzo Stoakes vma_start_write(curr); 129649b1b8d6SLorenzo Stoakes remove = curr; 129749b1b8d6SLorenzo Stoakes err = dup_anon_vma(next, curr, &anon_dup); 129849b1b8d6SLorenzo Stoakes } 129949b1b8d6SLorenzo Stoakes } 130049b1b8d6SLorenzo Stoakes } 130149b1b8d6SLorenzo Stoakes 130249b1b8d6SLorenzo Stoakes /* Error in anon_vma clone. */ 130349b1b8d6SLorenzo Stoakes if (err) 130449b1b8d6SLorenzo Stoakes goto anon_vma_fail; 130549b1b8d6SLorenzo Stoakes 130649b1b8d6SLorenzo Stoakes if (vma_start < vma->vm_start || vma_end > vma->vm_end) 130749b1b8d6SLorenzo Stoakes vma_expanded = true; 130849b1b8d6SLorenzo Stoakes 130949b1b8d6SLorenzo Stoakes if (vma_expanded) { 13102f1c6611SLorenzo Stoakes vma_iter_config(vmg->vmi, vma_start, vma_end); 131149b1b8d6SLorenzo Stoakes } else { 13122f1c6611SLorenzo Stoakes vma_iter_config(vmg->vmi, adjust->vm_start + adj_start, 131349b1b8d6SLorenzo Stoakes adjust->vm_end); 131449b1b8d6SLorenzo Stoakes } 131549b1b8d6SLorenzo Stoakes 13162f1c6611SLorenzo Stoakes if (vma_iter_prealloc(vmg->vmi, vma)) 131749b1b8d6SLorenzo Stoakes goto prealloc_fail; 131849b1b8d6SLorenzo Stoakes 131949b1b8d6SLorenzo Stoakes init_multi_vma_prep(&vp, vma, adjust, remove, remove2); 132049b1b8d6SLorenzo Stoakes VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma && 132149b1b8d6SLorenzo Stoakes vp.anon_vma != adjust->anon_vma); 132249b1b8d6SLorenzo Stoakes 132349b1b8d6SLorenzo Stoakes vma_prepare(&vp); 132449b1b8d6SLorenzo Stoakes vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start); 132549b1b8d6SLorenzo Stoakes vma_set_range(vma, vma_start, vma_end, vma_pgoff); 132649b1b8d6SLorenzo Stoakes 132749b1b8d6SLorenzo Stoakes if (vma_expanded) 13282f1c6611SLorenzo Stoakes vma_iter_store(vmg->vmi, vma); 132949b1b8d6SLorenzo Stoakes 133049b1b8d6SLorenzo Stoakes if (adj_start) { 133149b1b8d6SLorenzo Stoakes adjust->vm_start += adj_start; 133249b1b8d6SLorenzo Stoakes adjust->vm_pgoff += adj_start >> PAGE_SHIFT; 133349b1b8d6SLorenzo Stoakes if (adj_start < 0) { 133449b1b8d6SLorenzo Stoakes WARN_ON(vma_expanded); 13352f1c6611SLorenzo Stoakes vma_iter_store(vmg->vmi, next); 133649b1b8d6SLorenzo Stoakes } 133749b1b8d6SLorenzo Stoakes } 133849b1b8d6SLorenzo Stoakes 13392f1c6611SLorenzo Stoakes vma_complete(&vp, vmg->vmi, mm); 134089b2d2a5SLiam R. Howlett validate_mm(mm); 13412f1c6611SLorenzo Stoakes khugepaged_enter_vma(res, vmg->flags); 1342*cacded5eSLorenzo Stoakes 1343*cacded5eSLorenzo Stoakes vmg->state = VMA_MERGE_SUCCESS; 134449b1b8d6SLorenzo Stoakes return res; 134549b1b8d6SLorenzo Stoakes 134649b1b8d6SLorenzo Stoakes prealloc_fail: 1347*cacded5eSLorenzo Stoakes vmg->state = VMA_MERGE_ERROR_NOMEM; 134849b1b8d6SLorenzo Stoakes if (anon_dup) 134949b1b8d6SLorenzo Stoakes unlink_anon_vmas(anon_dup); 135049b1b8d6SLorenzo Stoakes 135149b1b8d6SLorenzo Stoakes anon_vma_fail: 1352*cacded5eSLorenzo Stoakes if (err == -ENOMEM) 1353*cacded5eSLorenzo Stoakes vmg->state = VMA_MERGE_ERROR_NOMEM; 1354*cacded5eSLorenzo Stoakes 13552f1c6611SLorenzo Stoakes vma_iter_set(vmg->vmi, addr); 13562f1c6611SLorenzo Stoakes vma_iter_load(vmg->vmi); 135749b1b8d6SLorenzo Stoakes return NULL; 135849b1b8d6SLorenzo Stoakes } 135949b1b8d6SLorenzo Stoakes 136049b1b8d6SLorenzo Stoakes /* 136149b1b8d6SLorenzo Stoakes * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd 136249b1b8d6SLorenzo Stoakes * context and anonymous VMA name within the range [start, end). 136349b1b8d6SLorenzo Stoakes * 136449b1b8d6SLorenzo Stoakes * As a result, we might be able to merge the newly modified VMA range with an 136549b1b8d6SLorenzo Stoakes * adjacent VMA with identical properties. 136649b1b8d6SLorenzo Stoakes * 136749b1b8d6SLorenzo Stoakes * If no merge is possible and the range does not span the entirety of the VMA, 136849b1b8d6SLorenzo Stoakes * we then need to split the VMA to accommodate the change. 136949b1b8d6SLorenzo Stoakes * 137049b1b8d6SLorenzo Stoakes * The function returns either the merged VMA, the original VMA if a split was 137149b1b8d6SLorenzo Stoakes * required instead, or an error if the split failed. 137249b1b8d6SLorenzo Stoakes */ 13732f1c6611SLorenzo Stoakes static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg) 137449b1b8d6SLorenzo Stoakes { 13752f1c6611SLorenzo Stoakes struct vm_area_struct *vma = vmg->vma; 137649b1b8d6SLorenzo Stoakes struct vm_area_struct *merged; 137749b1b8d6SLorenzo Stoakes 13782f1c6611SLorenzo Stoakes /* First, try to merge. */ 13792f1c6611SLorenzo Stoakes merged = vma_merge(vmg); 138049b1b8d6SLorenzo Stoakes if (merged) 138149b1b8d6SLorenzo Stoakes return merged; 138249b1b8d6SLorenzo Stoakes 13832f1c6611SLorenzo Stoakes /* Split any preceding portion of the VMA. */ 13842f1c6611SLorenzo Stoakes if (vma->vm_start < vmg->start) { 13852f1c6611SLorenzo Stoakes int err = split_vma(vmg->vmi, vma, vmg->start, 1); 138649b1b8d6SLorenzo Stoakes 138749b1b8d6SLorenzo Stoakes if (err) 138849b1b8d6SLorenzo Stoakes return ERR_PTR(err); 138949b1b8d6SLorenzo Stoakes } 139049b1b8d6SLorenzo Stoakes 13912f1c6611SLorenzo Stoakes /* Split any trailing portion of the VMA. */ 13922f1c6611SLorenzo Stoakes if (vma->vm_end > vmg->end) { 13932f1c6611SLorenzo Stoakes int err = split_vma(vmg->vmi, vma, vmg->end, 0); 139449b1b8d6SLorenzo Stoakes 139549b1b8d6SLorenzo Stoakes if (err) 139649b1b8d6SLorenzo Stoakes return ERR_PTR(err); 139749b1b8d6SLorenzo Stoakes } 139849b1b8d6SLorenzo Stoakes 139949b1b8d6SLorenzo Stoakes return vma; 140049b1b8d6SLorenzo Stoakes } 140149b1b8d6SLorenzo Stoakes 14022f1c6611SLorenzo Stoakes struct vm_area_struct *vma_modify_flags( 14032f1c6611SLorenzo Stoakes struct vma_iterator *vmi, struct vm_area_struct *prev, 14042f1c6611SLorenzo Stoakes struct vm_area_struct *vma, unsigned long start, unsigned long end, 14052f1c6611SLorenzo Stoakes unsigned long new_flags) 14062f1c6611SLorenzo Stoakes { 14072f1c6611SLorenzo Stoakes VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 14082f1c6611SLorenzo Stoakes 14092f1c6611SLorenzo Stoakes vmg.flags = new_flags; 14102f1c6611SLorenzo Stoakes 14112f1c6611SLorenzo Stoakes return vma_modify(&vmg); 14122f1c6611SLorenzo Stoakes } 14132f1c6611SLorenzo Stoakes 14142f1c6611SLorenzo Stoakes struct vm_area_struct 14152f1c6611SLorenzo Stoakes *vma_modify_flags_name(struct vma_iterator *vmi, 14162f1c6611SLorenzo Stoakes struct vm_area_struct *prev, 14172f1c6611SLorenzo Stoakes struct vm_area_struct *vma, 14182f1c6611SLorenzo Stoakes unsigned long start, 14192f1c6611SLorenzo Stoakes unsigned long end, 14202f1c6611SLorenzo Stoakes unsigned long new_flags, 14212f1c6611SLorenzo Stoakes struct anon_vma_name *new_name) 14222f1c6611SLorenzo Stoakes { 14232f1c6611SLorenzo Stoakes VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 14242f1c6611SLorenzo Stoakes 14252f1c6611SLorenzo Stoakes vmg.flags = new_flags; 14262f1c6611SLorenzo Stoakes vmg.anon_name = new_name; 14272f1c6611SLorenzo Stoakes 14282f1c6611SLorenzo Stoakes return vma_modify(&vmg); 14292f1c6611SLorenzo Stoakes } 14302f1c6611SLorenzo Stoakes 14312f1c6611SLorenzo Stoakes struct vm_area_struct 14322f1c6611SLorenzo Stoakes *vma_modify_policy(struct vma_iterator *vmi, 14332f1c6611SLorenzo Stoakes struct vm_area_struct *prev, 14342f1c6611SLorenzo Stoakes struct vm_area_struct *vma, 14352f1c6611SLorenzo Stoakes unsigned long start, unsigned long end, 14362f1c6611SLorenzo Stoakes struct mempolicy *new_pol) 14372f1c6611SLorenzo Stoakes { 14382f1c6611SLorenzo Stoakes VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 14392f1c6611SLorenzo Stoakes 14402f1c6611SLorenzo Stoakes vmg.policy = new_pol; 14412f1c6611SLorenzo Stoakes 14422f1c6611SLorenzo Stoakes return vma_modify(&vmg); 14432f1c6611SLorenzo Stoakes } 14442f1c6611SLorenzo Stoakes 14452f1c6611SLorenzo Stoakes struct vm_area_struct 14462f1c6611SLorenzo Stoakes *vma_modify_flags_uffd(struct vma_iterator *vmi, 14472f1c6611SLorenzo Stoakes struct vm_area_struct *prev, 14482f1c6611SLorenzo Stoakes struct vm_area_struct *vma, 14492f1c6611SLorenzo Stoakes unsigned long start, unsigned long end, 14502f1c6611SLorenzo Stoakes unsigned long new_flags, 14512f1c6611SLorenzo Stoakes struct vm_userfaultfd_ctx new_ctx) 14522f1c6611SLorenzo Stoakes { 14532f1c6611SLorenzo Stoakes VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 14542f1c6611SLorenzo Stoakes 14552f1c6611SLorenzo Stoakes vmg.flags = new_flags; 14562f1c6611SLorenzo Stoakes vmg.uffd_ctx = new_ctx; 14572f1c6611SLorenzo Stoakes 14582f1c6611SLorenzo Stoakes return vma_modify(&vmg); 14592f1c6611SLorenzo Stoakes } 14602f1c6611SLorenzo Stoakes 146149b1b8d6SLorenzo Stoakes /* 146249b1b8d6SLorenzo Stoakes * Expand vma by delta bytes, potentially merging with an immediately adjacent 146349b1b8d6SLorenzo Stoakes * VMA with identical properties. 146449b1b8d6SLorenzo Stoakes */ 146549b1b8d6SLorenzo Stoakes struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi, 146649b1b8d6SLorenzo Stoakes struct vm_area_struct *vma, 146749b1b8d6SLorenzo Stoakes unsigned long delta) 146849b1b8d6SLorenzo Stoakes { 14692f1c6611SLorenzo Stoakes VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta); 147049b1b8d6SLorenzo Stoakes 1471*cacded5eSLorenzo Stoakes vmg.next = vma_iter_next_rewind(vmi, NULL); 1472*cacded5eSLorenzo Stoakes vmg.vma = NULL; /* We use the VMA to populate VMG fields only. */ 1473*cacded5eSLorenzo Stoakes 1474*cacded5eSLorenzo Stoakes return vma_merge_new_range(&vmg); 147549b1b8d6SLorenzo Stoakes } 147649b1b8d6SLorenzo Stoakes 147749b1b8d6SLorenzo Stoakes void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb) 147849b1b8d6SLorenzo Stoakes { 147949b1b8d6SLorenzo Stoakes vb->count = 0; 148049b1b8d6SLorenzo Stoakes } 148149b1b8d6SLorenzo Stoakes 148249b1b8d6SLorenzo Stoakes static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb) 148349b1b8d6SLorenzo Stoakes { 148449b1b8d6SLorenzo Stoakes struct address_space *mapping; 148549b1b8d6SLorenzo Stoakes int i; 148649b1b8d6SLorenzo Stoakes 148749b1b8d6SLorenzo Stoakes mapping = vb->vmas[0]->vm_file->f_mapping; 148849b1b8d6SLorenzo Stoakes i_mmap_lock_write(mapping); 148949b1b8d6SLorenzo Stoakes for (i = 0; i < vb->count; i++) { 149049b1b8d6SLorenzo Stoakes VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping); 149149b1b8d6SLorenzo Stoakes __remove_shared_vm_struct(vb->vmas[i], mapping); 149249b1b8d6SLorenzo Stoakes } 149349b1b8d6SLorenzo Stoakes i_mmap_unlock_write(mapping); 149449b1b8d6SLorenzo Stoakes 149549b1b8d6SLorenzo Stoakes unlink_file_vma_batch_init(vb); 149649b1b8d6SLorenzo Stoakes } 149749b1b8d6SLorenzo Stoakes 149849b1b8d6SLorenzo Stoakes void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb, 149949b1b8d6SLorenzo Stoakes struct vm_area_struct *vma) 150049b1b8d6SLorenzo Stoakes { 150149b1b8d6SLorenzo Stoakes if (vma->vm_file == NULL) 150249b1b8d6SLorenzo Stoakes return; 150349b1b8d6SLorenzo Stoakes 150449b1b8d6SLorenzo Stoakes if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) || 150549b1b8d6SLorenzo Stoakes vb->count == ARRAY_SIZE(vb->vmas)) 150649b1b8d6SLorenzo Stoakes unlink_file_vma_batch_process(vb); 150749b1b8d6SLorenzo Stoakes 150849b1b8d6SLorenzo Stoakes vb->vmas[vb->count] = vma; 150949b1b8d6SLorenzo Stoakes vb->count++; 151049b1b8d6SLorenzo Stoakes } 151149b1b8d6SLorenzo Stoakes 151249b1b8d6SLorenzo Stoakes void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb) 151349b1b8d6SLorenzo Stoakes { 151449b1b8d6SLorenzo Stoakes if (vb->count > 0) 151549b1b8d6SLorenzo Stoakes unlink_file_vma_batch_process(vb); 151649b1b8d6SLorenzo Stoakes } 151749b1b8d6SLorenzo Stoakes 151849b1b8d6SLorenzo Stoakes /* 151949b1b8d6SLorenzo Stoakes * Unlink a file-based vm structure from its interval tree, to hide 152049b1b8d6SLorenzo Stoakes * vma from rmap and vmtruncate before freeing its page tables. 152149b1b8d6SLorenzo Stoakes */ 152249b1b8d6SLorenzo Stoakes void unlink_file_vma(struct vm_area_struct *vma) 152349b1b8d6SLorenzo Stoakes { 152449b1b8d6SLorenzo Stoakes struct file *file = vma->vm_file; 152549b1b8d6SLorenzo Stoakes 152649b1b8d6SLorenzo Stoakes if (file) { 152749b1b8d6SLorenzo Stoakes struct address_space *mapping = file->f_mapping; 152849b1b8d6SLorenzo Stoakes 152949b1b8d6SLorenzo Stoakes i_mmap_lock_write(mapping); 153049b1b8d6SLorenzo Stoakes __remove_shared_vm_struct(vma, mapping); 153149b1b8d6SLorenzo Stoakes i_mmap_unlock_write(mapping); 153249b1b8d6SLorenzo Stoakes } 153349b1b8d6SLorenzo Stoakes } 153449b1b8d6SLorenzo Stoakes 153549b1b8d6SLorenzo Stoakes void vma_link_file(struct vm_area_struct *vma) 153649b1b8d6SLorenzo Stoakes { 153749b1b8d6SLorenzo Stoakes struct file *file = vma->vm_file; 153849b1b8d6SLorenzo Stoakes struct address_space *mapping; 153949b1b8d6SLorenzo Stoakes 154049b1b8d6SLorenzo Stoakes if (file) { 154149b1b8d6SLorenzo Stoakes mapping = file->f_mapping; 154249b1b8d6SLorenzo Stoakes i_mmap_lock_write(mapping); 154349b1b8d6SLorenzo Stoakes __vma_link_file(vma, mapping); 154449b1b8d6SLorenzo Stoakes i_mmap_unlock_write(mapping); 154549b1b8d6SLorenzo Stoakes } 154649b1b8d6SLorenzo Stoakes } 154749b1b8d6SLorenzo Stoakes 154849b1b8d6SLorenzo Stoakes int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) 154949b1b8d6SLorenzo Stoakes { 155049b1b8d6SLorenzo Stoakes VMA_ITERATOR(vmi, mm, 0); 155149b1b8d6SLorenzo Stoakes 155249b1b8d6SLorenzo Stoakes vma_iter_config(&vmi, vma->vm_start, vma->vm_end); 155349b1b8d6SLorenzo Stoakes if (vma_iter_prealloc(&vmi, vma)) 155449b1b8d6SLorenzo Stoakes return -ENOMEM; 155549b1b8d6SLorenzo Stoakes 155649b1b8d6SLorenzo Stoakes vma_start_write(vma); 155749b1b8d6SLorenzo Stoakes vma_iter_store(&vmi, vma); 155849b1b8d6SLorenzo Stoakes vma_link_file(vma); 155949b1b8d6SLorenzo Stoakes mm->map_count++; 156049b1b8d6SLorenzo Stoakes validate_mm(mm); 156149b1b8d6SLorenzo Stoakes return 0; 156249b1b8d6SLorenzo Stoakes } 156349b1b8d6SLorenzo Stoakes 156449b1b8d6SLorenzo Stoakes /* 156549b1b8d6SLorenzo Stoakes * Copy the vma structure to a new location in the same mm, 156649b1b8d6SLorenzo Stoakes * prior to moving page table entries, to effect an mremap move. 156749b1b8d6SLorenzo Stoakes */ 156849b1b8d6SLorenzo Stoakes struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 156949b1b8d6SLorenzo Stoakes unsigned long addr, unsigned long len, pgoff_t pgoff, 157049b1b8d6SLorenzo Stoakes bool *need_rmap_locks) 157149b1b8d6SLorenzo Stoakes { 157249b1b8d6SLorenzo Stoakes struct vm_area_struct *vma = *vmap; 157349b1b8d6SLorenzo Stoakes unsigned long vma_start = vma->vm_start; 157449b1b8d6SLorenzo Stoakes struct mm_struct *mm = vma->vm_mm; 1575*cacded5eSLorenzo Stoakes struct vm_area_struct *new_vma; 157649b1b8d6SLorenzo Stoakes bool faulted_in_anon_vma = true; 157749b1b8d6SLorenzo Stoakes VMA_ITERATOR(vmi, mm, addr); 1578*cacded5eSLorenzo Stoakes VMG_VMA_STATE(vmg, &vmi, NULL, vma, addr, addr + len); 157949b1b8d6SLorenzo Stoakes 158049b1b8d6SLorenzo Stoakes /* 158149b1b8d6SLorenzo Stoakes * If anonymous vma has not yet been faulted, update new pgoff 158249b1b8d6SLorenzo Stoakes * to match new location, to increase its chance of merging. 158349b1b8d6SLorenzo Stoakes */ 158449b1b8d6SLorenzo Stoakes if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { 158549b1b8d6SLorenzo Stoakes pgoff = addr >> PAGE_SHIFT; 158649b1b8d6SLorenzo Stoakes faulted_in_anon_vma = false; 158749b1b8d6SLorenzo Stoakes } 158849b1b8d6SLorenzo Stoakes 1589*cacded5eSLorenzo Stoakes new_vma = find_vma_prev(mm, addr, &vmg.prev); 159049b1b8d6SLorenzo Stoakes if (new_vma && new_vma->vm_start < addr + len) 159149b1b8d6SLorenzo Stoakes return NULL; /* should never get here */ 159249b1b8d6SLorenzo Stoakes 1593*cacded5eSLorenzo Stoakes vmg.vma = NULL; /* New VMA range. */ 1594*cacded5eSLorenzo Stoakes vmg.pgoff = pgoff; 1595*cacded5eSLorenzo Stoakes vmg.next = vma_iter_next_rewind(&vmi, NULL); 1596*cacded5eSLorenzo Stoakes new_vma = vma_merge_new_range(&vmg); 1597*cacded5eSLorenzo Stoakes 159849b1b8d6SLorenzo Stoakes if (new_vma) { 159949b1b8d6SLorenzo Stoakes /* 160049b1b8d6SLorenzo Stoakes * Source vma may have been merged into new_vma 160149b1b8d6SLorenzo Stoakes */ 160249b1b8d6SLorenzo Stoakes if (unlikely(vma_start >= new_vma->vm_start && 160349b1b8d6SLorenzo Stoakes vma_start < new_vma->vm_end)) { 160449b1b8d6SLorenzo Stoakes /* 160549b1b8d6SLorenzo Stoakes * The only way we can get a vma_merge with 160649b1b8d6SLorenzo Stoakes * self during an mremap is if the vma hasn't 160749b1b8d6SLorenzo Stoakes * been faulted in yet and we were allowed to 160849b1b8d6SLorenzo Stoakes * reset the dst vma->vm_pgoff to the 160949b1b8d6SLorenzo Stoakes * destination address of the mremap to allow 161049b1b8d6SLorenzo Stoakes * the merge to happen. mremap must change the 161149b1b8d6SLorenzo Stoakes * vm_pgoff linearity between src and dst vmas 161249b1b8d6SLorenzo Stoakes * (in turn preventing a vma_merge) to be 161349b1b8d6SLorenzo Stoakes * safe. It is only safe to keep the vm_pgoff 161449b1b8d6SLorenzo Stoakes * linear if there are no pages mapped yet. 161549b1b8d6SLorenzo Stoakes */ 161649b1b8d6SLorenzo Stoakes VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); 161749b1b8d6SLorenzo Stoakes *vmap = vma = new_vma; 161849b1b8d6SLorenzo Stoakes } 161949b1b8d6SLorenzo Stoakes *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); 162049b1b8d6SLorenzo Stoakes } else { 162149b1b8d6SLorenzo Stoakes new_vma = vm_area_dup(vma); 162249b1b8d6SLorenzo Stoakes if (!new_vma) 162349b1b8d6SLorenzo Stoakes goto out; 162449b1b8d6SLorenzo Stoakes vma_set_range(new_vma, addr, addr + len, pgoff); 162549b1b8d6SLorenzo Stoakes if (vma_dup_policy(vma, new_vma)) 162649b1b8d6SLorenzo Stoakes goto out_free_vma; 162749b1b8d6SLorenzo Stoakes if (anon_vma_clone(new_vma, vma)) 162849b1b8d6SLorenzo Stoakes goto out_free_mempol; 162949b1b8d6SLorenzo Stoakes if (new_vma->vm_file) 163049b1b8d6SLorenzo Stoakes get_file(new_vma->vm_file); 163149b1b8d6SLorenzo Stoakes if (new_vma->vm_ops && new_vma->vm_ops->open) 163249b1b8d6SLorenzo Stoakes new_vma->vm_ops->open(new_vma); 163349b1b8d6SLorenzo Stoakes if (vma_link(mm, new_vma)) 163449b1b8d6SLorenzo Stoakes goto out_vma_link; 163549b1b8d6SLorenzo Stoakes *need_rmap_locks = false; 163649b1b8d6SLorenzo Stoakes } 163749b1b8d6SLorenzo Stoakes return new_vma; 163849b1b8d6SLorenzo Stoakes 163949b1b8d6SLorenzo Stoakes out_vma_link: 164049b1b8d6SLorenzo Stoakes if (new_vma->vm_ops && new_vma->vm_ops->close) 164149b1b8d6SLorenzo Stoakes new_vma->vm_ops->close(new_vma); 164249b1b8d6SLorenzo Stoakes 164349b1b8d6SLorenzo Stoakes if (new_vma->vm_file) 164449b1b8d6SLorenzo Stoakes fput(new_vma->vm_file); 164549b1b8d6SLorenzo Stoakes 164649b1b8d6SLorenzo Stoakes unlink_anon_vmas(new_vma); 164749b1b8d6SLorenzo Stoakes out_free_mempol: 164849b1b8d6SLorenzo Stoakes mpol_put(vma_policy(new_vma)); 164949b1b8d6SLorenzo Stoakes out_free_vma: 165049b1b8d6SLorenzo Stoakes vm_area_free(new_vma); 165149b1b8d6SLorenzo Stoakes out: 165249b1b8d6SLorenzo Stoakes return NULL; 165349b1b8d6SLorenzo Stoakes } 165449b1b8d6SLorenzo Stoakes 165549b1b8d6SLorenzo Stoakes /* 165649b1b8d6SLorenzo Stoakes * Rough compatibility check to quickly see if it's even worth looking 165749b1b8d6SLorenzo Stoakes * at sharing an anon_vma. 165849b1b8d6SLorenzo Stoakes * 165949b1b8d6SLorenzo Stoakes * They need to have the same vm_file, and the flags can only differ 166049b1b8d6SLorenzo Stoakes * in things that mprotect may change. 166149b1b8d6SLorenzo Stoakes * 166249b1b8d6SLorenzo Stoakes * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that 166349b1b8d6SLorenzo Stoakes * we can merge the two vma's. For example, we refuse to merge a vma if 166449b1b8d6SLorenzo Stoakes * there is a vm_ops->close() function, because that indicates that the 166549b1b8d6SLorenzo Stoakes * driver is doing some kind of reference counting. But that doesn't 166649b1b8d6SLorenzo Stoakes * really matter for the anon_vma sharing case. 166749b1b8d6SLorenzo Stoakes */ 166849b1b8d6SLorenzo Stoakes static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) 166949b1b8d6SLorenzo Stoakes { 167049b1b8d6SLorenzo Stoakes return a->vm_end == b->vm_start && 167149b1b8d6SLorenzo Stoakes mpol_equal(vma_policy(a), vma_policy(b)) && 167249b1b8d6SLorenzo Stoakes a->vm_file == b->vm_file && 167349b1b8d6SLorenzo Stoakes !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) && 167449b1b8d6SLorenzo Stoakes b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); 167549b1b8d6SLorenzo Stoakes } 167649b1b8d6SLorenzo Stoakes 167749b1b8d6SLorenzo Stoakes /* 167849b1b8d6SLorenzo Stoakes * Do some basic sanity checking to see if we can re-use the anon_vma 167949b1b8d6SLorenzo Stoakes * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be 168049b1b8d6SLorenzo Stoakes * the same as 'old', the other will be the new one that is trying 168149b1b8d6SLorenzo Stoakes * to share the anon_vma. 168249b1b8d6SLorenzo Stoakes * 168349b1b8d6SLorenzo Stoakes * NOTE! This runs with mmap_lock held for reading, so it is possible that 168449b1b8d6SLorenzo Stoakes * the anon_vma of 'old' is concurrently in the process of being set up 168549b1b8d6SLorenzo Stoakes * by another page fault trying to merge _that_. But that's ok: if it 168649b1b8d6SLorenzo Stoakes * is being set up, that automatically means that it will be a singleton 168749b1b8d6SLorenzo Stoakes * acceptable for merging, so we can do all of this optimistically. But 168849b1b8d6SLorenzo Stoakes * we do that READ_ONCE() to make sure that we never re-load the pointer. 168949b1b8d6SLorenzo Stoakes * 169049b1b8d6SLorenzo Stoakes * IOW: that the "list_is_singular()" test on the anon_vma_chain only 169149b1b8d6SLorenzo Stoakes * matters for the 'stable anon_vma' case (ie the thing we want to avoid 169249b1b8d6SLorenzo Stoakes * is to return an anon_vma that is "complex" due to having gone through 169349b1b8d6SLorenzo Stoakes * a fork). 169449b1b8d6SLorenzo Stoakes * 169549b1b8d6SLorenzo Stoakes * We also make sure that the two vma's are compatible (adjacent, 169649b1b8d6SLorenzo Stoakes * and with the same memory policies). That's all stable, even with just 169749b1b8d6SLorenzo Stoakes * a read lock on the mmap_lock. 169849b1b8d6SLorenzo Stoakes */ 169949b1b8d6SLorenzo Stoakes static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, 170049b1b8d6SLorenzo Stoakes struct vm_area_struct *a, 170149b1b8d6SLorenzo Stoakes struct vm_area_struct *b) 170249b1b8d6SLorenzo Stoakes { 170349b1b8d6SLorenzo Stoakes if (anon_vma_compatible(a, b)) { 170449b1b8d6SLorenzo Stoakes struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); 170549b1b8d6SLorenzo Stoakes 170649b1b8d6SLorenzo Stoakes if (anon_vma && list_is_singular(&old->anon_vma_chain)) 170749b1b8d6SLorenzo Stoakes return anon_vma; 170849b1b8d6SLorenzo Stoakes } 170949b1b8d6SLorenzo Stoakes return NULL; 171049b1b8d6SLorenzo Stoakes } 171149b1b8d6SLorenzo Stoakes 171249b1b8d6SLorenzo Stoakes /* 171349b1b8d6SLorenzo Stoakes * find_mergeable_anon_vma is used by anon_vma_prepare, to check 171449b1b8d6SLorenzo Stoakes * neighbouring vmas for a suitable anon_vma, before it goes off 171549b1b8d6SLorenzo Stoakes * to allocate a new anon_vma. It checks because a repetitive 171649b1b8d6SLorenzo Stoakes * sequence of mprotects and faults may otherwise lead to distinct 171749b1b8d6SLorenzo Stoakes * anon_vmas being allocated, preventing vma merge in subsequent 171849b1b8d6SLorenzo Stoakes * mprotect. 171949b1b8d6SLorenzo Stoakes */ 172049b1b8d6SLorenzo Stoakes struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) 172149b1b8d6SLorenzo Stoakes { 172249b1b8d6SLorenzo Stoakes struct anon_vma *anon_vma = NULL; 172349b1b8d6SLorenzo Stoakes struct vm_area_struct *prev, *next; 172449b1b8d6SLorenzo Stoakes VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end); 172549b1b8d6SLorenzo Stoakes 172649b1b8d6SLorenzo Stoakes /* Try next first. */ 172749b1b8d6SLorenzo Stoakes next = vma_iter_load(&vmi); 172849b1b8d6SLorenzo Stoakes if (next) { 172949b1b8d6SLorenzo Stoakes anon_vma = reusable_anon_vma(next, vma, next); 173049b1b8d6SLorenzo Stoakes if (anon_vma) 173149b1b8d6SLorenzo Stoakes return anon_vma; 173249b1b8d6SLorenzo Stoakes } 173349b1b8d6SLorenzo Stoakes 173449b1b8d6SLorenzo Stoakes prev = vma_prev(&vmi); 173549b1b8d6SLorenzo Stoakes VM_BUG_ON_VMA(prev != vma, vma); 173649b1b8d6SLorenzo Stoakes prev = vma_prev(&vmi); 173749b1b8d6SLorenzo Stoakes /* Try prev next. */ 173849b1b8d6SLorenzo Stoakes if (prev) 173949b1b8d6SLorenzo Stoakes anon_vma = reusable_anon_vma(prev, prev, vma); 174049b1b8d6SLorenzo Stoakes 174149b1b8d6SLorenzo Stoakes /* 174249b1b8d6SLorenzo Stoakes * We might reach here with anon_vma == NULL if we can't find 174349b1b8d6SLorenzo Stoakes * any reusable anon_vma. 174449b1b8d6SLorenzo Stoakes * There's no absolute need to look only at touching neighbours: 174549b1b8d6SLorenzo Stoakes * we could search further afield for "compatible" anon_vmas. 174649b1b8d6SLorenzo Stoakes * But it would probably just be a waste of time searching, 174749b1b8d6SLorenzo Stoakes * or lead to too many vmas hanging off the same anon_vma. 174849b1b8d6SLorenzo Stoakes * We're trying to allow mprotect remerging later on, 174949b1b8d6SLorenzo Stoakes * not trying to minimize memory used for anon_vmas. 175049b1b8d6SLorenzo Stoakes */ 175149b1b8d6SLorenzo Stoakes return anon_vma; 175249b1b8d6SLorenzo Stoakes } 175349b1b8d6SLorenzo Stoakes 175449b1b8d6SLorenzo Stoakes static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops) 175549b1b8d6SLorenzo Stoakes { 175649b1b8d6SLorenzo Stoakes return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite); 175749b1b8d6SLorenzo Stoakes } 175849b1b8d6SLorenzo Stoakes 175949b1b8d6SLorenzo Stoakes static bool vma_is_shared_writable(struct vm_area_struct *vma) 176049b1b8d6SLorenzo Stoakes { 176149b1b8d6SLorenzo Stoakes return (vma->vm_flags & (VM_WRITE | VM_SHARED)) == 176249b1b8d6SLorenzo Stoakes (VM_WRITE | VM_SHARED); 176349b1b8d6SLorenzo Stoakes } 176449b1b8d6SLorenzo Stoakes 176549b1b8d6SLorenzo Stoakes static bool vma_fs_can_writeback(struct vm_area_struct *vma) 176649b1b8d6SLorenzo Stoakes { 176749b1b8d6SLorenzo Stoakes /* No managed pages to writeback. */ 176849b1b8d6SLorenzo Stoakes if (vma->vm_flags & VM_PFNMAP) 176949b1b8d6SLorenzo Stoakes return false; 177049b1b8d6SLorenzo Stoakes 177149b1b8d6SLorenzo Stoakes return vma->vm_file && vma->vm_file->f_mapping && 177249b1b8d6SLorenzo Stoakes mapping_can_writeback(vma->vm_file->f_mapping); 177349b1b8d6SLorenzo Stoakes } 177449b1b8d6SLorenzo Stoakes 177549b1b8d6SLorenzo Stoakes /* 177649b1b8d6SLorenzo Stoakes * Does this VMA require the underlying folios to have their dirty state 177749b1b8d6SLorenzo Stoakes * tracked? 177849b1b8d6SLorenzo Stoakes */ 177949b1b8d6SLorenzo Stoakes bool vma_needs_dirty_tracking(struct vm_area_struct *vma) 178049b1b8d6SLorenzo Stoakes { 178149b1b8d6SLorenzo Stoakes /* Only shared, writable VMAs require dirty tracking. */ 178249b1b8d6SLorenzo Stoakes if (!vma_is_shared_writable(vma)) 178349b1b8d6SLorenzo Stoakes return false; 178449b1b8d6SLorenzo Stoakes 178549b1b8d6SLorenzo Stoakes /* Does the filesystem need to be notified? */ 178649b1b8d6SLorenzo Stoakes if (vm_ops_needs_writenotify(vma->vm_ops)) 178749b1b8d6SLorenzo Stoakes return true; 178849b1b8d6SLorenzo Stoakes 178949b1b8d6SLorenzo Stoakes /* 179049b1b8d6SLorenzo Stoakes * Even if the filesystem doesn't indicate a need for writenotify, if it 179149b1b8d6SLorenzo Stoakes * can writeback, dirty tracking is still required. 179249b1b8d6SLorenzo Stoakes */ 179349b1b8d6SLorenzo Stoakes return vma_fs_can_writeback(vma); 179449b1b8d6SLorenzo Stoakes } 179549b1b8d6SLorenzo Stoakes 179649b1b8d6SLorenzo Stoakes /* 179749b1b8d6SLorenzo Stoakes * Some shared mappings will want the pages marked read-only 179849b1b8d6SLorenzo Stoakes * to track write events. If so, we'll downgrade vm_page_prot 179949b1b8d6SLorenzo Stoakes * to the private version (using protection_map[] without the 180049b1b8d6SLorenzo Stoakes * VM_SHARED bit). 180149b1b8d6SLorenzo Stoakes */ 180249b1b8d6SLorenzo Stoakes bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) 180349b1b8d6SLorenzo Stoakes { 180449b1b8d6SLorenzo Stoakes /* If it was private or non-writable, the write bit is already clear */ 180549b1b8d6SLorenzo Stoakes if (!vma_is_shared_writable(vma)) 180649b1b8d6SLorenzo Stoakes return false; 180749b1b8d6SLorenzo Stoakes 180849b1b8d6SLorenzo Stoakes /* The backer wishes to know when pages are first written to? */ 180949b1b8d6SLorenzo Stoakes if (vm_ops_needs_writenotify(vma->vm_ops)) 181049b1b8d6SLorenzo Stoakes return true; 181149b1b8d6SLorenzo Stoakes 181249b1b8d6SLorenzo Stoakes /* The open routine did something to the protections that pgprot_modify 181349b1b8d6SLorenzo Stoakes * won't preserve? */ 181449b1b8d6SLorenzo Stoakes if (pgprot_val(vm_page_prot) != 181549b1b8d6SLorenzo Stoakes pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags))) 181649b1b8d6SLorenzo Stoakes return false; 181749b1b8d6SLorenzo Stoakes 181849b1b8d6SLorenzo Stoakes /* 181949b1b8d6SLorenzo Stoakes * Do we need to track softdirty? hugetlb does not support softdirty 182049b1b8d6SLorenzo Stoakes * tracking yet. 182149b1b8d6SLorenzo Stoakes */ 182249b1b8d6SLorenzo Stoakes if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma)) 182349b1b8d6SLorenzo Stoakes return true; 182449b1b8d6SLorenzo Stoakes 182549b1b8d6SLorenzo Stoakes /* Do we need write faults for uffd-wp tracking? */ 182649b1b8d6SLorenzo Stoakes if (userfaultfd_wp(vma)) 182749b1b8d6SLorenzo Stoakes return true; 182849b1b8d6SLorenzo Stoakes 182949b1b8d6SLorenzo Stoakes /* Can the mapping track the dirty pages? */ 183049b1b8d6SLorenzo Stoakes return vma_fs_can_writeback(vma); 183149b1b8d6SLorenzo Stoakes } 183249b1b8d6SLorenzo Stoakes 183349b1b8d6SLorenzo Stoakes static DEFINE_MUTEX(mm_all_locks_mutex); 183449b1b8d6SLorenzo Stoakes 183549b1b8d6SLorenzo Stoakes static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) 183649b1b8d6SLorenzo Stoakes { 183749b1b8d6SLorenzo Stoakes if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 183849b1b8d6SLorenzo Stoakes /* 183949b1b8d6SLorenzo Stoakes * The LSB of head.next can't change from under us 184049b1b8d6SLorenzo Stoakes * because we hold the mm_all_locks_mutex. 184149b1b8d6SLorenzo Stoakes */ 184249b1b8d6SLorenzo Stoakes down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); 184349b1b8d6SLorenzo Stoakes /* 184449b1b8d6SLorenzo Stoakes * We can safely modify head.next after taking the 184549b1b8d6SLorenzo Stoakes * anon_vma->root->rwsem. If some other vma in this mm shares 184649b1b8d6SLorenzo Stoakes * the same anon_vma we won't take it again. 184749b1b8d6SLorenzo Stoakes * 184849b1b8d6SLorenzo Stoakes * No need of atomic instructions here, head.next 184949b1b8d6SLorenzo Stoakes * can't change from under us thanks to the 185049b1b8d6SLorenzo Stoakes * anon_vma->root->rwsem. 185149b1b8d6SLorenzo Stoakes */ 185249b1b8d6SLorenzo Stoakes if (__test_and_set_bit(0, (unsigned long *) 185349b1b8d6SLorenzo Stoakes &anon_vma->root->rb_root.rb_root.rb_node)) 185449b1b8d6SLorenzo Stoakes BUG(); 185549b1b8d6SLorenzo Stoakes } 185649b1b8d6SLorenzo Stoakes } 185749b1b8d6SLorenzo Stoakes 185849b1b8d6SLorenzo Stoakes static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) 185949b1b8d6SLorenzo Stoakes { 186049b1b8d6SLorenzo Stoakes if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 186149b1b8d6SLorenzo Stoakes /* 186249b1b8d6SLorenzo Stoakes * AS_MM_ALL_LOCKS can't change from under us because 186349b1b8d6SLorenzo Stoakes * we hold the mm_all_locks_mutex. 186449b1b8d6SLorenzo Stoakes * 186549b1b8d6SLorenzo Stoakes * Operations on ->flags have to be atomic because 186649b1b8d6SLorenzo Stoakes * even if AS_MM_ALL_LOCKS is stable thanks to the 186749b1b8d6SLorenzo Stoakes * mm_all_locks_mutex, there may be other cpus 186849b1b8d6SLorenzo Stoakes * changing other bitflags in parallel to us. 186949b1b8d6SLorenzo Stoakes */ 187049b1b8d6SLorenzo Stoakes if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) 187149b1b8d6SLorenzo Stoakes BUG(); 187249b1b8d6SLorenzo Stoakes down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); 187349b1b8d6SLorenzo Stoakes } 187449b1b8d6SLorenzo Stoakes } 187549b1b8d6SLorenzo Stoakes 187649b1b8d6SLorenzo Stoakes /* 187749b1b8d6SLorenzo Stoakes * This operation locks against the VM for all pte/vma/mm related 187849b1b8d6SLorenzo Stoakes * operations that could ever happen on a certain mm. This includes 187949b1b8d6SLorenzo Stoakes * vmtruncate, try_to_unmap, and all page faults. 188049b1b8d6SLorenzo Stoakes * 188149b1b8d6SLorenzo Stoakes * The caller must take the mmap_lock in write mode before calling 188249b1b8d6SLorenzo Stoakes * mm_take_all_locks(). The caller isn't allowed to release the 188349b1b8d6SLorenzo Stoakes * mmap_lock until mm_drop_all_locks() returns. 188449b1b8d6SLorenzo Stoakes * 188549b1b8d6SLorenzo Stoakes * mmap_lock in write mode is required in order to block all operations 188649b1b8d6SLorenzo Stoakes * that could modify pagetables and free pages without need of 188749b1b8d6SLorenzo Stoakes * altering the vma layout. It's also needed in write mode to avoid new 188849b1b8d6SLorenzo Stoakes * anon_vmas to be associated with existing vmas. 188949b1b8d6SLorenzo Stoakes * 189049b1b8d6SLorenzo Stoakes * A single task can't take more than one mm_take_all_locks() in a row 189149b1b8d6SLorenzo Stoakes * or it would deadlock. 189249b1b8d6SLorenzo Stoakes * 189349b1b8d6SLorenzo Stoakes * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in 189449b1b8d6SLorenzo Stoakes * mapping->flags avoid to take the same lock twice, if more than one 189549b1b8d6SLorenzo Stoakes * vma in this mm is backed by the same anon_vma or address_space. 189649b1b8d6SLorenzo Stoakes * 189749b1b8d6SLorenzo Stoakes * We take locks in following order, accordingly to comment at beginning 189849b1b8d6SLorenzo Stoakes * of mm/rmap.c: 189949b1b8d6SLorenzo Stoakes * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for 190049b1b8d6SLorenzo Stoakes * hugetlb mapping); 190149b1b8d6SLorenzo Stoakes * - all vmas marked locked 190249b1b8d6SLorenzo Stoakes * - all i_mmap_rwsem locks; 190349b1b8d6SLorenzo Stoakes * - all anon_vma->rwseml 190449b1b8d6SLorenzo Stoakes * 190549b1b8d6SLorenzo Stoakes * We can take all locks within these types randomly because the VM code 190649b1b8d6SLorenzo Stoakes * doesn't nest them and we protected from parallel mm_take_all_locks() by 190749b1b8d6SLorenzo Stoakes * mm_all_locks_mutex. 190849b1b8d6SLorenzo Stoakes * 190949b1b8d6SLorenzo Stoakes * mm_take_all_locks() and mm_drop_all_locks are expensive operations 191049b1b8d6SLorenzo Stoakes * that may have to take thousand of locks. 191149b1b8d6SLorenzo Stoakes * 191249b1b8d6SLorenzo Stoakes * mm_take_all_locks() can fail if it's interrupted by signals. 191349b1b8d6SLorenzo Stoakes */ 191449b1b8d6SLorenzo Stoakes int mm_take_all_locks(struct mm_struct *mm) 191549b1b8d6SLorenzo Stoakes { 191649b1b8d6SLorenzo Stoakes struct vm_area_struct *vma; 191749b1b8d6SLorenzo Stoakes struct anon_vma_chain *avc; 191849b1b8d6SLorenzo Stoakes VMA_ITERATOR(vmi, mm, 0); 191949b1b8d6SLorenzo Stoakes 192049b1b8d6SLorenzo Stoakes mmap_assert_write_locked(mm); 192149b1b8d6SLorenzo Stoakes 192249b1b8d6SLorenzo Stoakes mutex_lock(&mm_all_locks_mutex); 192349b1b8d6SLorenzo Stoakes 192449b1b8d6SLorenzo Stoakes /* 192549b1b8d6SLorenzo Stoakes * vma_start_write() does not have a complement in mm_drop_all_locks() 192649b1b8d6SLorenzo Stoakes * because vma_start_write() is always asymmetrical; it marks a VMA as 192749b1b8d6SLorenzo Stoakes * being written to until mmap_write_unlock() or mmap_write_downgrade() 192849b1b8d6SLorenzo Stoakes * is reached. 192949b1b8d6SLorenzo Stoakes */ 193049b1b8d6SLorenzo Stoakes for_each_vma(vmi, vma) { 193149b1b8d6SLorenzo Stoakes if (signal_pending(current)) 193249b1b8d6SLorenzo Stoakes goto out_unlock; 193349b1b8d6SLorenzo Stoakes vma_start_write(vma); 193449b1b8d6SLorenzo Stoakes } 193549b1b8d6SLorenzo Stoakes 193649b1b8d6SLorenzo Stoakes vma_iter_init(&vmi, mm, 0); 193749b1b8d6SLorenzo Stoakes for_each_vma(vmi, vma) { 193849b1b8d6SLorenzo Stoakes if (signal_pending(current)) 193949b1b8d6SLorenzo Stoakes goto out_unlock; 194049b1b8d6SLorenzo Stoakes if (vma->vm_file && vma->vm_file->f_mapping && 194149b1b8d6SLorenzo Stoakes is_vm_hugetlb_page(vma)) 194249b1b8d6SLorenzo Stoakes vm_lock_mapping(mm, vma->vm_file->f_mapping); 194349b1b8d6SLorenzo Stoakes } 194449b1b8d6SLorenzo Stoakes 194549b1b8d6SLorenzo Stoakes vma_iter_init(&vmi, mm, 0); 194649b1b8d6SLorenzo Stoakes for_each_vma(vmi, vma) { 194749b1b8d6SLorenzo Stoakes if (signal_pending(current)) 194849b1b8d6SLorenzo Stoakes goto out_unlock; 194949b1b8d6SLorenzo Stoakes if (vma->vm_file && vma->vm_file->f_mapping && 195049b1b8d6SLorenzo Stoakes !is_vm_hugetlb_page(vma)) 195149b1b8d6SLorenzo Stoakes vm_lock_mapping(mm, vma->vm_file->f_mapping); 195249b1b8d6SLorenzo Stoakes } 195349b1b8d6SLorenzo Stoakes 195449b1b8d6SLorenzo Stoakes vma_iter_init(&vmi, mm, 0); 195549b1b8d6SLorenzo Stoakes for_each_vma(vmi, vma) { 195649b1b8d6SLorenzo Stoakes if (signal_pending(current)) 195749b1b8d6SLorenzo Stoakes goto out_unlock; 195849b1b8d6SLorenzo Stoakes if (vma->anon_vma) 195949b1b8d6SLorenzo Stoakes list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 196049b1b8d6SLorenzo Stoakes vm_lock_anon_vma(mm, avc->anon_vma); 196149b1b8d6SLorenzo Stoakes } 196249b1b8d6SLorenzo Stoakes 196349b1b8d6SLorenzo Stoakes return 0; 196449b1b8d6SLorenzo Stoakes 196549b1b8d6SLorenzo Stoakes out_unlock: 196649b1b8d6SLorenzo Stoakes mm_drop_all_locks(mm); 196749b1b8d6SLorenzo Stoakes return -EINTR; 196849b1b8d6SLorenzo Stoakes } 196949b1b8d6SLorenzo Stoakes 197049b1b8d6SLorenzo Stoakes static void vm_unlock_anon_vma(struct anon_vma *anon_vma) 197149b1b8d6SLorenzo Stoakes { 197249b1b8d6SLorenzo Stoakes if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { 197349b1b8d6SLorenzo Stoakes /* 197449b1b8d6SLorenzo Stoakes * The LSB of head.next can't change to 0 from under 197549b1b8d6SLorenzo Stoakes * us because we hold the mm_all_locks_mutex. 197649b1b8d6SLorenzo Stoakes * 197749b1b8d6SLorenzo Stoakes * We must however clear the bitflag before unlocking 197849b1b8d6SLorenzo Stoakes * the vma so the users using the anon_vma->rb_root will 197949b1b8d6SLorenzo Stoakes * never see our bitflag. 198049b1b8d6SLorenzo Stoakes * 198149b1b8d6SLorenzo Stoakes * No need of atomic instructions here, head.next 198249b1b8d6SLorenzo Stoakes * can't change from under us until we release the 198349b1b8d6SLorenzo Stoakes * anon_vma->root->rwsem. 198449b1b8d6SLorenzo Stoakes */ 198549b1b8d6SLorenzo Stoakes if (!__test_and_clear_bit(0, (unsigned long *) 198649b1b8d6SLorenzo Stoakes &anon_vma->root->rb_root.rb_root.rb_node)) 198749b1b8d6SLorenzo Stoakes BUG(); 198849b1b8d6SLorenzo Stoakes anon_vma_unlock_write(anon_vma); 198949b1b8d6SLorenzo Stoakes } 199049b1b8d6SLorenzo Stoakes } 199149b1b8d6SLorenzo Stoakes 199249b1b8d6SLorenzo Stoakes static void vm_unlock_mapping(struct address_space *mapping) 199349b1b8d6SLorenzo Stoakes { 199449b1b8d6SLorenzo Stoakes if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { 199549b1b8d6SLorenzo Stoakes /* 199649b1b8d6SLorenzo Stoakes * AS_MM_ALL_LOCKS can't change to 0 from under us 199749b1b8d6SLorenzo Stoakes * because we hold the mm_all_locks_mutex. 199849b1b8d6SLorenzo Stoakes */ 199949b1b8d6SLorenzo Stoakes i_mmap_unlock_write(mapping); 200049b1b8d6SLorenzo Stoakes if (!test_and_clear_bit(AS_MM_ALL_LOCKS, 200149b1b8d6SLorenzo Stoakes &mapping->flags)) 200249b1b8d6SLorenzo Stoakes BUG(); 200349b1b8d6SLorenzo Stoakes } 200449b1b8d6SLorenzo Stoakes } 200549b1b8d6SLorenzo Stoakes 200649b1b8d6SLorenzo Stoakes /* 200749b1b8d6SLorenzo Stoakes * The mmap_lock cannot be released by the caller until 200849b1b8d6SLorenzo Stoakes * mm_drop_all_locks() returns. 200949b1b8d6SLorenzo Stoakes */ 201049b1b8d6SLorenzo Stoakes void mm_drop_all_locks(struct mm_struct *mm) 201149b1b8d6SLorenzo Stoakes { 201249b1b8d6SLorenzo Stoakes struct vm_area_struct *vma; 201349b1b8d6SLorenzo Stoakes struct anon_vma_chain *avc; 201449b1b8d6SLorenzo Stoakes VMA_ITERATOR(vmi, mm, 0); 201549b1b8d6SLorenzo Stoakes 201649b1b8d6SLorenzo Stoakes mmap_assert_write_locked(mm); 201749b1b8d6SLorenzo Stoakes BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); 201849b1b8d6SLorenzo Stoakes 201949b1b8d6SLorenzo Stoakes for_each_vma(vmi, vma) { 202049b1b8d6SLorenzo Stoakes if (vma->anon_vma) 202149b1b8d6SLorenzo Stoakes list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 202249b1b8d6SLorenzo Stoakes vm_unlock_anon_vma(avc->anon_vma); 202349b1b8d6SLorenzo Stoakes if (vma->vm_file && vma->vm_file->f_mapping) 202449b1b8d6SLorenzo Stoakes vm_unlock_mapping(vma->vm_file->f_mapping); 202549b1b8d6SLorenzo Stoakes } 202649b1b8d6SLorenzo Stoakes 202749b1b8d6SLorenzo Stoakes mutex_unlock(&mm_all_locks_mutex); 202849b1b8d6SLorenzo Stoakes } 2029