Lines Matching +full:delta +full:- +full:y +full:- +full:threshold

1 // SPDX-License-Identifier: GPL-2.0
48 * delta to account for hugetlb alignment).
51 /* User-provided state. */
52 unsigned long addr; /* User-specified address from which we remap. */
55 const unsigned long flags; /* user-specified MREMAP_* flags. */
67 unsigned long delta; /* Absolute delta of old_len,new_len. */ member
70 bool mmap_locked; /* Is mm currently write-locked? */
145 if (vma->vm_file) in take_rmap_locks()
146 i_mmap_lock_write(vma->vm_file->f_mapping); in take_rmap_locks()
147 if (vma->anon_vma) in take_rmap_locks()
148 anon_vma_lock_write(vma->anon_vma); in take_rmap_locks()
153 if (vma->anon_vma) in drop_rmap_locks()
154 anon_vma_unlock_write(vma->anon_vma); in drop_rmap_locks()
155 if (vma->vm_file) in drop_rmap_locks()
156 i_mmap_unlock_write(vma->vm_file->f_mapping); in drop_rmap_locks()
196 struct vm_area_struct *vma = pmc->old; in move_ptes()
198 struct mm_struct *mm = vma->vm_mm; in move_ptes()
204 unsigned long old_addr = pmc->old_addr; in move_ptes()
205 unsigned long new_addr = pmc->new_addr; in move_ptes()
207 unsigned long len = old_end - old_addr; in move_ptes()
221 * - During exec() shift_arg_pages(), we use a specially tagged vma in move_ptes()
224 * - During mremap(), new_vma is often known to be placed after vma in move_ptes()
230 if (pmc->need_rmap_locks) in move_ptes()
239 err = -EAGAIN; in move_ptes()
244 * this by traversing file->f_mapping, so there is no concurrency with in move_ptes()
253 err = -EAGAIN; in move_ptes()
258 flush_tlb_batched_pending(vma->vm_mm); in move_ptes()
266 max_nr_ptes = (old_end - old_addr) >> PAGE_SHIFT; in move_ptes()
306 flush_tlb_range(vma, old_end - len, old_end); in move_ptes()
309 pte_unmap(new_ptep - 1); in move_ptes()
310 pte_unmap_unlock(old_ptep - 1, old_ptl); in move_ptes()
312 if (pmc->need_rmap_locks) in move_ptes()
329 * If we are moving a VMA that has uffd-wp registered but with in uffd_supports_page_table_move()
331 * need to ensure that the uffd-wp state is cleared from all pgtables. in uffd_supports_page_table_move()
336 * "old"-but-actually-"originally new" VMA during recovery will not have in uffd_supports_page_table_move()
339 * run into already-existing page tables. So check both VMAs. in uffd_supports_page_table_move()
341 return !vma_has_uffd_without_event_remap(pmc->old) && in uffd_supports_page_table_move()
342 !vma_has_uffd_without_event_remap(pmc->new); in uffd_supports_page_table_move()
350 struct vm_area_struct *vma = pmc->old; in move_normal_pmd()
351 struct mm_struct *mm = vma->vm_mm; in move_normal_pmd()
367 * If everything is PMD-aligned, that works fine, as moving in move_normal_pmd()
369 * have a few 4kB-only pages that get moved down, and then in move_normal_pmd()
370 * hit the "now the rest is PMD-aligned, let's do everything in move_normal_pmd()
375 * Warn on it once - because we really should try to figure in move_normal_pmd()
376 * out how to do this better - but then say "I won't move in move_normal_pmd()
406 flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PMD_SIZE); in move_normal_pmd()
427 struct vm_area_struct *vma = pmc->old; in move_normal_pud()
428 struct mm_struct *mm = vma->vm_mm; in move_normal_pud()
458 flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PUD_SIZE); in move_normal_pud()
478 struct vm_area_struct *vma = pmc->old; in move_huge_pud()
479 struct mm_struct *mm = vma->vm_mm; in move_huge_pud()
506 set_pud_at(mm, pmc->new_addr, new_pud, pud); in move_huge_pud()
507 flush_pud_tlb_range(vma, pmc->old_addr, pmc->old_addr + HPAGE_PUD_SIZE); in move_huge_pud()
541 unsigned long old_addr = pmc->old_addr; in get_extent()
542 unsigned long old_end = pmc->old_end; in get_extent()
543 unsigned long new_addr = pmc->new_addr; in get_extent()
563 extent = next - old_addr; in get_extent()
564 if (extent > old_end - old_addr) in get_extent()
565 extent = old_end - old_addr; in get_extent()
567 if (extent > next - new_addr) in get_extent()
568 extent = next - new_addr; in get_extent()
584 return pmc->need_rmap_locks; in should_take_rmap_locks()
600 take_rmap_locks(pmc->old); in move_pgt_entry()
611 move_huge_pmd(pmc->old, pmc->old_addr, pmc->new_addr, old_entry, in move_pgt_entry()
625 drop_rmap_locks(pmc->old); in move_pgt_entry()
647 if (!pmc->for_stack && vma->vm_start != addr_to_align) in can_align_down()
650 /* In the stack case we explicitly permit in-VMA alignment. */ in can_align_down()
651 if (pmc->for_stack && addr_masked >= vma->vm_start) in can_align_down()
658 return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL; in can_align_down()
669 unsigned long old_align = pmc->old_addr & align_mask; in can_realign_addr()
670 unsigned long new_align = pmc->new_addr & align_mask; in can_realign_addr()
672 unsigned long old_align_next = pagetable_size - old_align; in can_realign_addr()
683 * .<- old_align -> . in can_realign_addr()
684 * . |----------------.-----------| in can_realign_addr()
686 * . |----------------.-----------| in can_realign_addr()
687 * . <----------------.-----------> in can_realign_addr()
689 * <-------------------------------> in can_realign_addr()
691 * . <----------------> in can_realign_addr()
694 if (pmc->len_in < old_align_next) in can_realign_addr()
706 if (!can_align_down(pmc, pmc->old, pmc->old_addr, pagetable_mask) || in can_realign_addr()
707 !can_align_down(pmc, pmc->new, pmc->new_addr, pagetable_mask)) in can_realign_addr()
721 * . |----------------.-----------|
723 * . |----------------.-----------|
724 * . pmc->old_addr . pmc->old_end
725 * . <---------------------------->
732 * The idea here is simply to align pmc->old_addr, pmc->new_addr down to the
737 * . |----------------.-----------|
739 * . |----------------.-----------|
740 * pmc->old_addr . pmc->old_end
741 * <------------------------------------------->
753 * pmc->old_end value, and since the move_page_tables() operation spans in try_realign_addr()
757 pmc->old_addr &= pagetable_mask; in try_realign_addr()
758 pmc->new_addr &= pagetable_mask; in try_realign_addr()
764 return pmc->old_addr >= pmc->old_end; in pmc_done()
770 pmc->old_addr += extent; in pmc_next()
771 pmc->new_addr += extent; in pmc_next()
780 unsigned long orig_old_addr = pmc->old_end - pmc->len_in; in pmc_progress()
781 unsigned long old_addr = pmc->old_addr; in pmc_progress()
788 return old_addr < orig_old_addr ? 0 : old_addr - orig_old_addr; in pmc_progress()
797 struct mm_struct *mm = pmc->old->vm_mm; in move_page_tables()
799 if (!pmc->len_in) in move_page_tables()
802 if (is_vm_hugetlb_page(pmc->old)) in move_page_tables()
803 return move_hugetlb_page_tables(pmc->old, pmc->new, pmc->old_addr, in move_page_tables()
804 pmc->new_addr, pmc->len_in); in move_page_tables()
812 flush_cache_range(pmc->old, pmc->old_addr, pmc->old_end); in move_page_tables()
814 pmc->old_addr, pmc->old_end); in move_page_tables()
820 * If extent is PUD-sized try to speed up the move by moving at the in move_page_tables()
825 old_pud = get_old_pud(mm, pmc->old_addr); in move_page_tables()
828 new_pud = alloc_new_pud(mm, pmc->new_addr); in move_page_tables()
843 old_pmd = get_old_pmd(mm, pmc->old_addr); in move_page_tables()
846 new_pmd = alloc_new_pmd(mm, pmc->new_addr); in move_page_tables()
854 split_huge_pmd(pmc->old, old_pmd, pmc->old_addr); in move_page_tables()
858 * If the extent is PMD-sized, try to speed the move by in move_page_tables()
866 if (pte_alloc(pmc->new->vm_mm, new_pmd)) in move_page_tables()
877 /* Set vrm->delta to the difference in VMA size specified by user. */
880 vrm->delta = abs_diff(vrm->old_len, vrm->new_len); in vrm_set_delta()
883 /* Determine what kind of remap this is - shrink, expand or no resize at all. */
886 if (vrm->delta == 0) in vrm_remap_type()
889 if (vrm->old_len > vrm->new_len) in vrm_remap_type()
896 * When moving a VMA to vrm->new_adr, does this result in the new and old VMAs
901 unsigned long start_old = vrm->addr; in vrm_overlaps()
902 unsigned long start_new = vrm->new_addr; in vrm_overlaps()
903 unsigned long end_old = vrm->addr + vrm->old_len; in vrm_overlaps()
904 unsigned long end_new = vrm->new_addr + vrm->new_len; in vrm_overlaps()
908 * |-----------| in vrm_overlaps()
910 * |-----------| in vrm_overlaps()
911 * |-------------| in vrm_overlaps()
913 * |-------------| in vrm_overlaps()
929 return vrm->flags & (MREMAP_FIXED | MREMAP_DONTUNMAP); in vrm_implies_new_addr()
933 * Find an unmapped area for the requested vrm->new_addr.
939 * Returns 0 on success (with vrm->new_addr updated), or an error code upon
944 struct vm_area_struct *vma = vrm->vma; in vrm_set_new_addr()
947 pgoff_t internal_pgoff = (vrm->addr - vma->vm_start) >> PAGE_SHIFT; in vrm_set_new_addr()
948 pgoff_t pgoff = vma->vm_pgoff + internal_pgoff; in vrm_set_new_addr()
949 unsigned long new_addr = vrm_implies_new_addr(vrm) ? vrm->new_addr : 0; in vrm_set_new_addr()
952 if (vrm->flags & MREMAP_FIXED) in vrm_set_new_addr()
954 if (vma->vm_flags & VM_MAYSHARE) in vrm_set_new_addr()
957 res = get_unmapped_area(vma->vm_file, new_addr, vrm->new_len, pgoff, in vrm_set_new_addr()
962 vrm->new_addr = res; in vrm_set_new_addr()
976 if (!(vrm->vma->vm_flags & VM_ACCOUNT)) in vrm_calc_charge()
981 * the length of the new one. Otherwise it's just the delta in size. in vrm_calc_charge()
983 if (vrm->flags & MREMAP_DONTUNMAP) in vrm_calc_charge()
984 charged = vrm->new_len >> PAGE_SHIFT; in vrm_calc_charge()
986 charged = vrm->delta >> PAGE_SHIFT; in vrm_calc_charge()
990 if (security_vm_enough_memory_mm(current->mm, charged)) in vrm_calc_charge()
993 vrm->charged = charged; in vrm_calc_charge()
998 * an error has occurred so we will not be using vrm->charged memory. Unaccount
1003 if (!(vrm->vma->vm_flags & VM_ACCOUNT)) in vrm_uncharge()
1006 vm_unacct_memory(vrm->charged); in vrm_uncharge()
1007 vrm->charged = 0; in vrm_uncharge()
1019 struct mm_struct *mm = current->mm; in vrm_stat_account()
1020 struct vm_area_struct *vma = vrm->vma; in vrm_stat_account()
1022 vm_stat_account(mm, vma->vm_flags, pages); in vrm_stat_account()
1023 if (vma->vm_flags & VM_LOCKED) in vrm_stat_account()
1024 mm->locked_vm += pages; in vrm_stat_account()
1034 struct vm_area_struct *vma = vrm->vma; in prep_move_vma()
1035 unsigned long old_addr = vrm->addr; in prep_move_vma()
1036 unsigned long old_len = vrm->old_len; in prep_move_vma()
1037 vm_flags_t dummy = vma->vm_flags; in prep_move_vma()
1043 if (current->mm->map_count >= sysctl_max_map_count - 3) in prep_move_vma()
1044 return -ENOMEM; in prep_move_vma()
1046 if (vma->vm_ops && vma->vm_ops->may_split) { in prep_move_vma()
1047 if (vma->vm_start != old_addr) in prep_move_vma()
1048 err = vma->vm_ops->may_split(vma, old_addr); in prep_move_vma()
1049 if (!err && vma->vm_end != old_addr + old_len) in prep_move_vma()
1050 err = vma->vm_ops->may_split(vma, old_addr + old_len); in prep_move_vma()
1059 * pages recently unmapped. But leave vma->vm_flags as it was, in prep_move_vma()
1080 struct mm_struct *mm = current->mm; in unmap_source_vma()
1081 unsigned long addr = vrm->addr; in unmap_source_vma()
1082 unsigned long len = vrm->old_len; in unmap_source_vma()
1083 struct vm_area_struct *vma = vrm->vma; in unmap_source_vma()
1097 bool accountable_move = (vma->vm_flags & VM_ACCOUNT) && in unmap_source_vma()
1098 !(vrm->flags & MREMAP_DONTUNMAP); in unmap_source_vma()
1118 vm_start = vma->vm_start; in unmap_source_vma()
1119 vm_end = vma->vm_end; in unmap_source_vma()
1122 err = do_vmi_munmap(&vmi, mm, addr, len, vrm->uf_unmap, /* unlock= */false); in unmap_source_vma()
1123 vrm->vma = NULL; /* Invalidated. */ in unmap_source_vma()
1124 vrm->vmi_needs_invalidate = true; in unmap_source_vma()
1137 * |-------------| in unmap_source_vma()
1139 * |-------------| in unmap_source_vma()
1147 * |---| |---| in unmap_source_vma()
1149 * |---| |---| in unmap_source_vma()
1174 * Copy vrm->vma over to vrm->new_addr possibly adjusting size as part of the
1184 unsigned long internal_offset = vrm->addr - vrm->vma->vm_start; in copy_vma_and_data()
1186 unsigned long new_pgoff = vrm->vma->vm_pgoff + internal_pgoff; in copy_vma_and_data()
1188 struct vm_area_struct *vma = vrm->vma; in copy_vma_and_data()
1191 PAGETABLE_MOVE(pmc, NULL, NULL, vrm->addr, vrm->new_addr, vrm->old_len); in copy_vma_and_data()
1193 new_vma = copy_vma(&vma, vrm->new_addr, vrm->new_len, new_pgoff, in copy_vma_and_data()
1198 return -ENOMEM; in copy_vma_and_data()
1201 if (vma != vrm->vma) in copy_vma_and_data()
1202 vrm->vmi_needs_invalidate = true; in copy_vma_and_data()
1204 vrm->vma = vma; in copy_vma_and_data()
1209 if (moved_len < vrm->old_len) in copy_vma_and_data()
1210 err = -ENOMEM; in copy_vma_and_data()
1211 else if (vma->vm_ops && vma->vm_ops->mremap) in copy_vma_and_data()
1212 err = vma->vm_ops->mremap(new_vma); in copy_vma_and_data()
1215 PAGETABLE_MOVE(pmc_revert, new_vma, vma, vrm->new_addr, in copy_vma_and_data()
1216 vrm->addr, moved_len); in copy_vma_and_data()
1226 vrm->vma = new_vma; in copy_vma_and_data()
1227 vrm->old_len = vrm->new_len; in copy_vma_and_data()
1228 vrm->addr = vrm->new_addr; in copy_vma_and_data()
1230 mremap_userfaultfd_prep(new_vma, vrm->uf); in copy_vma_and_data()
1248 unsigned long start = vrm->addr; in dontunmap_complete()
1249 unsigned long end = vrm->addr + vrm->old_len; in dontunmap_complete()
1250 unsigned long old_start = vrm->vma->vm_start; in dontunmap_complete()
1251 unsigned long old_end = vrm->vma->vm_end; in dontunmap_complete()
1254 vm_flags_clear(vrm->vma, VM_LOCKED_MASK); in dontunmap_complete()
1260 if (new_vma != vrm->vma && start == old_start && end == old_end) in dontunmap_complete()
1261 unlink_anon_vmas(vrm->vma); in dontunmap_complete()
1268 struct mm_struct *mm = current->mm; in move_vma()
1282 return -ENOMEM; in move_vma()
1285 vma_start_write(vrm->vma); in move_vma()
1290 * If we established the copied-to VMA, we attempt to recover from the in move_vma()
1306 hiwater_vm = mm->hiwater_vm; in move_vma()
1308 vrm_stat_account(vrm, vrm->new_len); in move_vma()
1309 if (unlikely(!err && (vrm->flags & MREMAP_DONTUNMAP))) in move_vma()
1314 mm->hiwater_vm = hiwater_vm; in move_vma()
1316 return err ? (unsigned long)err : vrm->new_addr; in move_vma()
1324 * then load the correct VMA into vrm->vma afterwards.
1329 struct mm_struct *mm = current->mm; in shrink_vma()
1330 unsigned long unmap_start = vrm->addr + vrm->new_len; in shrink_vma()
1331 unsigned long unmap_bytes = vrm->delta; in shrink_vma()
1335 VM_BUG_ON(vrm->remap_type != MREMAP_SHRINK); in shrink_vma()
1338 vrm->uf_unmap, drop_lock); in shrink_vma()
1339 vrm->vma = NULL; /* Invalidated. */ in shrink_vma()
1349 vrm->mmap_locked = false; in shrink_vma()
1351 vrm->vma = vma_lookup(mm, vrm->addr); in shrink_vma()
1352 if (!vrm->vma) in shrink_vma()
1353 return -EFAULT; in shrink_vma()
1360 * mremap_to() - remap a vma to a new location.
1365 struct mm_struct *mm = current->mm; in mremap_to()
1368 if (vrm->flags & MREMAP_FIXED) { in mremap_to()
1374 err = do_munmap(mm, vrm->new_addr, vrm->new_len, in mremap_to()
1375 vrm->uf_unmap_early); in mremap_to()
1376 vrm->vma = NULL; /* Invalidated. */ in mremap_to()
1377 vrm->vmi_needs_invalidate = true; in mremap_to()
1385 vrm->vma = vma_lookup(mm, vrm->addr); in mremap_to()
1386 if (!vrm->vma) in mremap_to()
1387 return -EFAULT; in mremap_to()
1390 if (vrm->remap_type == MREMAP_SHRINK) { in mremap_to()
1396 vrm->old_len = vrm->new_len; in mremap_to()
1400 if (vrm->flags & MREMAP_DONTUNMAP) { in mremap_to()
1401 vm_flags_t vm_flags = vrm->vma->vm_flags; in mremap_to()
1402 unsigned long pages = vrm->old_len >> PAGE_SHIFT; in mremap_to()
1405 return -ENOMEM; in mremap_to()
1415 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) in vma_expandable() argument
1417 unsigned long end = vma->vm_end + delta; in vma_expandable()
1419 if (end < vma->vm_end) /* overflow */ in vma_expandable()
1421 if (find_vma_intersection(vma->vm_mm, vma->vm_end, end)) in vma_expandable()
1423 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, in vma_expandable()
1429 /* Determine whether we are actually able to execute an in-place expansion. */
1432 /* Number of bytes from vrm->addr to end of VMA. */ in vrm_can_expand_in_place()
1433 unsigned long suffix_bytes = vrm->vma->vm_end - vrm->addr; in vrm_can_expand_in_place()
1435 /* If end of range aligns to end of VMA, we can just expand in-place. */ in vrm_can_expand_in_place()
1436 if (suffix_bytes != vrm->old_len) in vrm_can_expand_in_place()
1440 if (!vma_expandable(vrm->vma, vrm->delta)) in vrm_can_expand_in_place()
1447 * We know we can expand the VMA in-place by delta pages, so do so.
1454 struct mm_struct *mm = current->mm; in expand_vma_in_place()
1455 struct vm_area_struct *vma = vrm->vma; in expand_vma_in_place()
1456 VMA_ITERATOR(vmi, mm, vma->vm_end); in expand_vma_in_place()
1459 return -ENOMEM; in expand_vma_in_place()
1470 vma = vma_merge_extend(&vmi, vma, vrm->delta); in expand_vma_in_place()
1473 return -ENOMEM; in expand_vma_in_place()
1475 vrm->vma = vma; in expand_vma_in_place()
1477 vrm_stat_account(vrm, vrm->delta); in expand_vma_in_place()
1484 struct hstate *h __maybe_unused = hstate_vma(vrm->vma); in align_hugetlb()
1486 vrm->old_len = ALIGN(vrm->old_len, huge_page_size(h)); in align_hugetlb()
1487 vrm->new_len = ALIGN(vrm->new_len, huge_page_size(h)); in align_hugetlb()
1490 if (vrm->addr & ~huge_page_mask(h)) in align_hugetlb()
1492 if (vrm->new_addr & ~huge_page_mask(h)) in align_hugetlb()
1499 if (vrm->new_len > vrm->old_len) in align_hugetlb()
1509 * Try to do so in-place, if this fails, then move the VMA to a new location to
1518 * expand it in-place. in expand_vma()
1526 return vrm->addr; in expand_vma()
1535 if (!(vrm->flags & MREMAP_MAYMOVE)) in expand_vma()
1536 return -ENOMEM; in expand_vma()
1547 * Attempt to resize the VMA in-place, if we cannot, then move the VMA to the
1554 switch (vrm->remap_type) { in mremap_at()
1558 /* NO-OP CASE - resizing to the same size. */ in mremap_at()
1559 return vrm->addr; in mremap_at()
1562 * SHRINK CASE. Can always be done in-place. in mremap_at()
1572 return vrm->addr; in mremap_at()
1579 return -EINVAL; in mremap_at()
1588 if (vrm->remap_type == MREMAP_EXPAND) in vrm_will_map_new()
1600 if (!(vrm->flags & MREMAP_FIXED)) in vrm_move_only()
1603 if (vrm->old_len != vrm->new_len) in vrm_move_only()
1611 struct mm_struct *mm = current->mm; in notify_uffd()
1614 userfaultfd_unmap_complete(mm, vrm->uf_unmap_early); in notify_uffd()
1616 mremap_userfaultfd_fail(vrm->uf); in notify_uffd()
1618 mremap_userfaultfd_complete(vrm->uf, vrm->addr, in notify_uffd()
1619 vrm->new_addr, vrm->old_len); in notify_uffd()
1620 userfaultfd_unmap_complete(mm, vrm->uf_unmap); in notify_uffd()
1625 struct file *file = vma->vm_file; in vma_multi_allowed()
1638 if (!file || !file->f_op->get_unmapped_area) in vma_multi_allowed()
1645 if (file->f_op->get_unmapped_area == thp_get_unmapped_area) in vma_multi_allowed()
1653 struct vm_area_struct *vma = vrm->vma; in check_prep_vma()
1654 struct mm_struct *mm = current->mm; in check_prep_vma()
1655 unsigned long addr = vrm->addr; in check_prep_vma()
1659 return -EFAULT; in check_prep_vma()
1663 return -EPERM; in check_prep_vma()
1667 return -EINVAL; in check_prep_vma()
1670 vrm->remap_type = vrm_remap_type(vrm); in check_prep_vma()
1673 vrm->new_addr = addr; in check_prep_vma()
1679 old_len = vrm->old_len; in check_prep_vma()
1680 new_len = vrm->new_len; in check_prep_vma()
1690 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) { in check_prep_vma()
1692 current->comm, current->pid); in check_prep_vma()
1693 return -EINVAL; in check_prep_vma()
1696 if ((vrm->flags & MREMAP_DONTUNMAP) && in check_prep_vma()
1697 (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))) in check_prep_vma()
1698 return -EINVAL; in check_prep_vma()
1704 if (vrm->remap_type == MREMAP_SHRINK) in check_prep_vma()
1711 * addr vma->vm_end in check_prep_vma()
1712 * |-----.----------| in check_prep_vma()
1714 * |-----.----------| in check_prep_vma()
1715 * .<--------->xxx> in check_prep_vma()
1718 * We also require that vma->vm_start <= addr < vma->vm_end. in check_prep_vma()
1720 if (old_len > vma->vm_end - addr) in check_prep_vma()
1721 return -EFAULT; in check_prep_vma()
1727 if (vma->vm_flags & VM_LOCKED) in check_prep_vma()
1728 vrm->populate_expand = true; in check_prep_vma()
1731 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; in check_prep_vma()
1732 pgoff += vma->vm_pgoff; in check_prep_vma()
1734 return -EINVAL; in check_prep_vma()
1736 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) in check_prep_vma()
1737 return -EFAULT; in check_prep_vma()
1739 if (!mlock_future_ok(mm, vma->vm_flags, vrm->delta)) in check_prep_vma()
1740 return -EAGAIN; in check_prep_vma()
1742 if (!may_expand_vm(mm, vma->vm_flags, vrm->delta >> PAGE_SHIFT)) in check_prep_vma()
1743 return -ENOMEM; in check_prep_vma()
1755 unsigned long addr = vrm->addr; in check_mremap_params()
1756 unsigned long flags = vrm->flags; in check_mremap_params()
1760 return -EINVAL; in check_mremap_params()
1762 /* Start address must be page-aligned. */ in check_mremap_params()
1764 return -EINVAL; in check_mremap_params()
1767 * We allow a zero old-len as a special case in check_mremap_params()
1768 * for DOS-emu "duplicate shm area" thing. But in check_mremap_params()
1769 * a zero new-len is nonsensical. in check_mremap_params()
1771 if (!vrm->new_len) in check_mremap_params()
1772 return -EINVAL; in check_mremap_params()
1775 if (vrm->new_len > TASK_SIZE) in check_mremap_params()
1776 return -EINVAL; in check_mremap_params()
1783 if (vrm->new_addr > TASK_SIZE - vrm->new_len) in check_mremap_params()
1784 return -EINVAL; in check_mremap_params()
1786 /* The new address must be page-aligned. */ in check_mremap_params()
1787 if (offset_in_page(vrm->new_addr)) in check_mremap_params()
1788 return -EINVAL; in check_mremap_params()
1792 return -EINVAL; in check_mremap_params()
1795 if (flags & MREMAP_DONTUNMAP && vrm->old_len != vrm->new_len) in check_mremap_params()
1796 return -EINVAL; in check_mremap_params()
1800 return -EINVAL; in check_mremap_params()
1803 * move_vma() need us to stay 4 maps below the threshold, otherwise in check_mremap_params()
1807 * state of the vma's after it gets -ENOMEM. in check_mremap_params()
1808 * So, to avoid such scenario we can pre-compute if the whole in check_mremap_params()
1809 * operation has high chances to success map-wise. in check_mremap_params()
1810 * Worst-scenario case is when both vma's (new_addr and old_addr) get in check_mremap_params()
1814 * the threshold, otherwise return -ENOMEM here to be more safe. in check_mremap_params()
1816 if ((current->mm->map_count + 2) >= sysctl_max_map_count - 3) in check_mremap_params()
1817 return -ENOMEM; in check_mremap_params()
1825 unsigned long start = vrm->addr; in remap_move()
1826 unsigned long end = vrm->addr + vrm->old_len; in remap_move()
1827 unsigned long new_addr = vrm->new_addr; in remap_move()
1829 unsigned long res = -EFAULT; in remap_move()
1833 VMA_ITERATOR(vmi, current->mm, start); in remap_move()
1842 unsigned long addr = max(vma->vm_start, start); in remap_move()
1843 unsigned long len = min(end, vma->vm_end) - addr; in remap_move()
1848 if (!seen_vma && start < vma->vm_start) in remap_move()
1849 return -EFAULT; in remap_move()
1857 * X Y X Y in remap_move()
1858 * <---> <-> <---> <-> in remap_move()
1859 * |-------| |-----| |-----| |-------| |-----| |-----| in remap_move()
1860 * | A | | B | | C | ---> | A' | | B' | | C' | in remap_move()
1861 * |-------| |-----| |-----| |-------| |-----| |-----| in remap_move()
1864 * So we map B' at A'->vm_end + X, and C' at B'->vm_end + Y. in remap_move()
1866 offset = seen_vma ? vma->vm_start - last_end : 0; in remap_move()
1867 last_end = vma->vm_end; in remap_move()
1869 vrm->vma = vma; in remap_move()
1870 vrm->addr = addr; in remap_move()
1871 vrm->new_addr = target_addr + offset; in remap_move()
1872 vrm->old_len = vrm->new_len = len; in remap_move()
1878 return -EFAULT; in remap_move()
1880 if (vma->vm_end < end) in remap_move()
1881 return -EFAULT; in remap_move()
1896 VM_WARN_ON_ONCE(!vrm->mmap_locked); in remap_move()
1898 VM_WARN_ON_ONCE(vrm->populate_expand); in remap_move()
1900 if (vrm->vmi_needs_invalidate) { in remap_move()
1902 vrm->vmi_needs_invalidate = false; in remap_move()
1905 target_addr = res_vma + vrm->new_len; in remap_move()
1913 struct mm_struct *mm = current->mm; in do_mremap()
1917 vrm->old_len = PAGE_ALIGN(vrm->old_len); in do_mremap()
1918 vrm->new_len = PAGE_ALIGN(vrm->new_len); in do_mremap()
1925 return -EINTR; in do_mremap()
1926 vrm->mmap_locked = true; in do_mremap()
1931 vrm->vma = vma_lookup(current->mm, vrm->addr); in do_mremap()
1943 if (vrm->mmap_locked) in do_mremap()
1947 if (!failed && vrm->populate_expand) in do_mremap()
1948 mm_populate(vrm->new_addr + vrm->old_len, vrm->delta); in do_mremap()
1958 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
1973 * mapping address intact. A non-zero tag will cause the subsequent in SYSCALL_DEFINE5()
1976 * See Documentation/arch/arm64/tagged-address-abi.rst for more in SYSCALL_DEFINE5()