vma.c (65e0aa64df916861ad8579e23f885e56e5ec8647) vma.c (cc8cb3697a8d8eabe1fb9acb8768b11c1ab607d8)
1// SPDX-License-Identifier: GPL-2.0-or-later
2
3/*
4 * VMA-specific functions.
5 */
6
7#include "vma_internal.h"
8#include "vma.h"

--- 573 unchanged lines hidden (view full) ---

582 bug = 1;
583 }
584 VM_BUG_ON_MM(bug, mm);
585}
586#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
587
588/* Actually perform the VMA merge operation. */
589static int commit_merge(struct vma_merge_struct *vmg,
1// SPDX-License-Identifier: GPL-2.0-or-later
2
3/*
4 * VMA-specific functions.
5 */
6
7#include "vma_internal.h"
8#include "vma.h"

--- 573 unchanged lines hidden (view full) ---

582 bug = 1;
583 }
584 VM_BUG_ON_MM(bug, mm);
585}
586#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
587
588/* Actually perform the VMA merge operation. */
589static int commit_merge(struct vma_merge_struct *vmg,
590 struct vm_area_struct *remove)
590 struct vm_area_struct *adjust,
591 struct vm_area_struct *remove,
592 struct vm_area_struct *remove2,
593 long adj_start,
594 bool expanded)
591{
592 struct vma_prepare vp;
593
595{
596 struct vma_prepare vp;
597
594 init_multi_vma_prep(&vp, vmg->vma, NULL, remove, NULL);
598 init_multi_vma_prep(&vp, vmg->vma, adjust, remove, remove2);
595
599
596 /* Note: vma iterator must be pointing to 'start'. */
597 vma_iter_config(vmg->vmi, vmg->start, vmg->end);
600 VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
601 vp.anon_vma != adjust->anon_vma);
598
602
603 if (expanded) {
604 /* Note: vma iterator must be pointing to 'start'. */
605 vma_iter_config(vmg->vmi, vmg->start, vmg->end);
606 } else {
607 vma_iter_config(vmg->vmi, adjust->vm_start + adj_start,
608 adjust->vm_end);
609 }
610
599 if (vma_iter_prealloc(vmg->vmi, vmg->vma))
600 return -ENOMEM;
601
602 vma_prepare(&vp);
611 if (vma_iter_prealloc(vmg->vmi, vmg->vma))
612 return -ENOMEM;
613
614 vma_prepare(&vp);
603 vma_adjust_trans_huge(vmg->vma, vmg->start, vmg->end, 0);
615 vma_adjust_trans_huge(vmg->vma, vmg->start, vmg->end, adj_start);
604 vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
605
616 vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
617
606 vma_iter_store(vmg->vmi, vmg->vma);
618 if (expanded)
619 vma_iter_store(vmg->vmi, vmg->vma);
607
620
621 if (adj_start) {
622 adjust->vm_start += adj_start;
623 adjust->vm_pgoff += PHYS_PFN(adj_start);
624 if (adj_start < 0) {
625 WARN_ON(expanded);
626 vma_iter_store(vmg->vmi, adjust);
627 }
628 }
629
608 vma_complete(&vp, vmg->vmi, vmg->vma->vm_mm);
609
610 return 0;
611}
612
613/*
630 vma_complete(&vp, vmg->vmi, vmg->vma->vm_mm);
631
632 return 0;
633}
634
635/*
636 * vma_merge_existing_range - Attempt to merge VMAs based on a VMA having its
637 * attributes modified.
638 *
639 * @vmg: Describes the modifications being made to a VMA and associated
640 * metadata.
641 *
642 * When the attributes of a range within a VMA change, then it might be possible
643 * for immediately adjacent VMAs to be merged into that VMA due to having
644 * identical properties.
645 *
646 * This function checks for the existence of any such mergeable VMAs and updates
647 * the maple tree describing the @vmg->vma->vm_mm address space to account for
648 * this, as well as any VMAs shrunk/expanded/deleted as a result of this merge.
649 *
650 * As part of this operation, if a merge occurs, the @vmg object will have its
651 * vma, start, end, and pgoff fields modified to execute the merge. Subsequent
652 * calls to this function should reset these fields.
653 *
654 * Returns: The merged VMA if merge succeeds, or NULL otherwise.
655 *
656 * ASSUMPTIONS:
657 * - The caller must assign the VMA to be modifed to @vmg->vma.
658 * - The caller must have set @vmg->prev to the previous VMA, if there is one.
659 * - The caller must not set @vmg->next, as we determine this.
660 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
661 * - vmi must be positioned within [@vmg->vma->vm_start, @vmg->vma->vm_end).
662 */
663static struct vm_area_struct *vma_merge_existing_range(struct vma_merge_struct *vmg)
664{
665 struct vm_area_struct *vma = vmg->vma;
666 struct vm_area_struct *prev = vmg->prev;
667 struct vm_area_struct *next, *res;
668 struct vm_area_struct *anon_dup = NULL;
669 struct vm_area_struct *adjust = NULL;
670 unsigned long start = vmg->start;
671 unsigned long end = vmg->end;
672 bool left_side = vma && start == vma->vm_start;
673 bool right_side = vma && end == vma->vm_end;
674 int err = 0;
675 long adj_start = 0;
676 bool merge_will_delete_vma, merge_will_delete_next;
677 bool merge_left, merge_right, merge_both;
678 bool expanded;
679
680 mmap_assert_write_locked(vmg->mm);
681 VM_WARN_ON(!vma); /* We are modifying a VMA, so caller must specify. */
682 VM_WARN_ON(vmg->next); /* We set this. */
683 VM_WARN_ON(prev && start <= prev->vm_start);
684 VM_WARN_ON(start >= end);
685 /*
686 * If vma == prev, then we are offset into a VMA. Otherwise, if we are
687 * not, we must span a portion of the VMA.
688 */
689 VM_WARN_ON(vma && ((vma != prev && vmg->start != vma->vm_start) ||
690 vmg->end > vma->vm_end));
691 /* The vmi must be positioned within vmg->vma. */
692 VM_WARN_ON(vma && !(vma_iter_addr(vmg->vmi) >= vma->vm_start &&
693 vma_iter_addr(vmg->vmi) < vma->vm_end));
694
695 vmg->state = VMA_MERGE_NOMERGE;
696
697 /*
698 * If a special mapping or if the range being modified is neither at the
699 * furthermost left or right side of the VMA, then we have no chance of
700 * merging and should abort.
701 */
702 if (vmg->flags & VM_SPECIAL || (!left_side && !right_side))
703 return NULL;
704
705 if (left_side)
706 merge_left = can_vma_merge_left(vmg);
707 else
708 merge_left = false;
709
710 if (right_side) {
711 next = vmg->next = vma_iter_next_range(vmg->vmi);
712 vma_iter_prev_range(vmg->vmi);
713
714 merge_right = can_vma_merge_right(vmg, merge_left);
715 } else {
716 merge_right = false;
717 next = NULL;
718 }
719
720 if (merge_left) /* If merging prev, position iterator there. */
721 vma_prev(vmg->vmi);
722 else if (!merge_right) /* If we have nothing to merge, abort. */
723 return NULL;
724
725 merge_both = merge_left && merge_right;
726 /* If we span the entire VMA, a merge implies it will be deleted. */
727 merge_will_delete_vma = left_side && right_side;
728 /*
729 * If we merge both VMAs, then next is also deleted. This implies
730 * merge_will_delete_vma also.
731 */
732 merge_will_delete_next = merge_both;
733
734 /* No matter what happens, we will be adjusting vma. */
735 vma_start_write(vma);
736
737 if (merge_left)
738 vma_start_write(prev);
739
740 if (merge_right)
741 vma_start_write(next);
742
743 if (merge_both) {
744 /*
745 * |<----->|
746 * |-------*********-------|
747 * prev vma next
748 * extend delete delete
749 */
750
751 vmg->vma = prev;
752 vmg->start = prev->vm_start;
753 vmg->end = next->vm_end;
754 vmg->pgoff = prev->vm_pgoff;
755
756 /*
757 * We already ensured anon_vma compatibility above, so now it's
758 * simply a case of, if prev has no anon_vma object, which of
759 * next or vma contains the anon_vma we must duplicate.
760 */
761 err = dup_anon_vma(prev, next->anon_vma ? next : vma, &anon_dup);
762 } else if (merge_left) {
763 /*
764 * |<----->| OR
765 * |<--------->|
766 * |-------*************
767 * prev vma
768 * extend shrink/delete
769 */
770
771 vmg->vma = prev;
772 vmg->start = prev->vm_start;
773 vmg->pgoff = prev->vm_pgoff;
774
775 if (merge_will_delete_vma) {
776 /*
777 * can_vma_merge_after() assumed we would not be
778 * removing vma, so it skipped the check for
779 * vm_ops->close, but we are removing vma.
780 */
781 if (vma->vm_ops && vma->vm_ops->close)
782 err = -EINVAL;
783 } else {
784 adjust = vma;
785 adj_start = vmg->end - vma->vm_start;
786 }
787
788 if (!err)
789 err = dup_anon_vma(prev, vma, &anon_dup);
790 } else { /* merge_right */
791 /*
792 * |<----->| OR
793 * |<--------->|
794 * *************-------|
795 * vma next
796 * shrink/delete extend
797 */
798
799 pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
800
801 VM_WARN_ON(!merge_right);
802 /* If we are offset into a VMA, then prev must be vma. */
803 VM_WARN_ON(vmg->start > vma->vm_start && prev && vma != prev);
804
805 if (merge_will_delete_vma) {
806 vmg->vma = next;
807 vmg->end = next->vm_end;
808 vmg->pgoff = next->vm_pgoff - pglen;
809 } else {
810 /*
811 * We shrink vma and expand next.
812 *
813 * IMPORTANT: This is the ONLY case where the final
814 * merged VMA is NOT vmg->vma, but rather vmg->next.
815 */
816
817 vmg->start = vma->vm_start;
818 vmg->end = start;
819 vmg->pgoff = vma->vm_pgoff;
820
821 adjust = next;
822 adj_start = -(vma->vm_end - start);
823 }
824
825 err = dup_anon_vma(next, vma, &anon_dup);
826 }
827
828 if (err)
829 goto abort;
830
831 /*
832 * In nearly all cases, we expand vmg->vma. There is one exception -
833 * merge_right where we partially span the VMA. In this case we shrink
834 * the end of vmg->vma and adjust the start of vmg->next accordingly.
835 */
836 expanded = !merge_right || merge_will_delete_vma;
837
838 if (commit_merge(vmg, adjust,
839 merge_will_delete_vma ? vma : NULL,
840 merge_will_delete_next ? next : NULL,
841 adj_start, expanded)) {
842 if (anon_dup)
843 unlink_anon_vmas(anon_dup);
844
845 vmg->state = VMA_MERGE_ERROR_NOMEM;
846 return NULL;
847 }
848
849 res = merge_left ? prev : next;
850 khugepaged_enter_vma(res, vmg->flags);
851
852 vmg->state = VMA_MERGE_SUCCESS;
853 return res;
854
855abort:
856 vma_iter_set(vmg->vmi, start);
857 vma_iter_load(vmg->vmi);
858 vmg->state = VMA_MERGE_ERROR_NOMEM;
859 return NULL;
860}
861
862/*
614 * vma_merge_new_range - Attempt to merge a new VMA into address space
615 *
616 * @vmg: Describes the VMA we are adding, in the range @vmg->start to @vmg->end
617 * (exclusive), which we try to merge with any adjacent VMAs if possible.
618 *
619 * We are about to add a VMA to the address space starting at @vmg->start and
620 * ending at @vmg->end. There are three different possible scenarios:
621 *

--- 130 unchanged lines hidden (view full) ---

752 }
753
754 /* Not merging but overwriting any part of next is not handled. */
755 VM_WARN_ON(next && !remove_next &&
756 next != vma && vmg->end > next->vm_start);
757 /* Only handles expanding */
758 VM_WARN_ON(vma->vm_start < vmg->start || vma->vm_end > vmg->end);
759
863 * vma_merge_new_range - Attempt to merge a new VMA into address space
864 *
865 * @vmg: Describes the VMA we are adding, in the range @vmg->start to @vmg->end
866 * (exclusive), which we try to merge with any adjacent VMAs if possible.
867 *
868 * We are about to add a VMA to the address space starting at @vmg->start and
869 * ending at @vmg->end. There are three different possible scenarios:
870 *

--- 130 unchanged lines hidden (view full) ---

1001 }
1002
1003 /* Not merging but overwriting any part of next is not handled. */
1004 VM_WARN_ON(next && !remove_next &&
1005 next != vma && vmg->end > next->vm_start);
1006 /* Only handles expanding */
1007 VM_WARN_ON(vma->vm_start < vmg->start || vma->vm_end > vmg->end);
1008
760 if (commit_merge(vmg, remove_next ? next : NULL))
1009 if (commit_merge(vmg, NULL, remove_next ? next : NULL, NULL, 0, true))
761 goto nomem;
762
763 return 0;
764
765nomem:
766 vmg->state = VMA_MERGE_ERROR_NOMEM;
767 if (anon_dup)
768 unlink_anon_vmas(anon_dup);

--- 354 unchanged lines hidden (view full) ---

1123 mmap_write_unlock(mm);
1124 return 0;
1125 }
1126
1127 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
1128}
1129
1130/*
1010 goto nomem;
1011
1012 return 0;
1013
1014nomem:
1015 vmg->state = VMA_MERGE_ERROR_NOMEM;
1016 if (anon_dup)
1017 unlink_anon_vmas(anon_dup);

--- 354 unchanged lines hidden (view full) ---

1372 mmap_write_unlock(mm);
1373 return 0;
1374 }
1375
1376 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
1377}
1378
1379/*
1131 * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
1132 * figure out whether that can be merged with its predecessor or its
1133 * successor. Or both (it neatly fills a hole).
1134 *
1135 * In most cases - when called for mmap, brk or mremap - [addr,end) is
1136 * certain not to be mapped by the time vma_merge is called; but when
1137 * called for mprotect, it is certain to be already mapped (either at
1138 * an offset within prev, or at the start of next), and the flags of
1139 * this area are about to be changed to vm_flags - and the no-change
1140 * case has already been eliminated.
1141 *
1142 * The following mprotect cases have to be considered, where **** is
1143 * the area passed down from mprotect_fixup, never extending beyond one
1144 * vma, PPPP is the previous vma, CCCC is a concurrent vma that starts
1145 * at the same address as **** and is of the same or larger span, and
1146 * NNNN the next vma after ****:
1147 *
1148 * **** **** ****
1149 * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPCCCCCC
1150 * cannot merge might become might become
1151 * PPNNNNNNNNNN PPPPPPPPPPCC
1152 * mmap, brk or case 4 below case 5 below
1153 * mremap move:
1154 * **** ****
1155 * PPPP NNNN PPPPCCCCNNNN
1156 * might become might become
1157 * PPPPPPPPPPPP 1 or PPPPPPPPPPPP 6 or
1158 * PPPPPPPPNNNN 2 or PPPPPPPPNNNN 7 or
1159 * PPPPNNNNNNNN 3 PPPPNNNNNNNN 8
1160 *
1161 * It is important for case 8 that the vma CCCC overlapping the
1162 * region **** is never going to extended over NNNN. Instead NNNN must
1163 * be extended in region **** and CCCC must be removed. This way in
1164 * all cases where vma_merge succeeds, the moment vma_merge drops the
1165 * rmap_locks, the properties of the merged vma will be already
1166 * correct for the whole merged range. Some of those properties like
1167 * vm_page_prot/vm_flags may be accessed by rmap_walks and they must
1168 * be correct for the whole merged range immediately after the
1169 * rmap_locks are released. Otherwise if NNNN would be removed and
1170 * CCCC would be extended over the NNNN range, remove_migration_ptes
1171 * or other rmap walkers (if working on addresses beyond the "end"
1172 * parameter) may establish ptes with the wrong permissions of CCCC
1173 * instead of the right permissions of NNNN.
1174 *
1175 * In the code below:
1176 * PPPP is represented by *prev
1177 * CCCC is represented by *curr or not represented at all (NULL)
1178 * NNNN is represented by *next or not represented at all (NULL)
1179 * **** is not represented - it will be merged and the vma containing the
1180 * area is returned, or the function will return NULL
1181 */
1182static struct vm_area_struct *vma_merge(struct vma_merge_struct *vmg)
1183{
1184 struct mm_struct *mm = vmg->mm;
1185 struct vm_area_struct *prev = vmg->prev;
1186 struct vm_area_struct *curr, *next, *res;
1187 struct vm_area_struct *vma, *adjust, *remove, *remove2;
1188 struct vm_area_struct *anon_dup = NULL;
1189 struct vma_prepare vp;
1190 pgoff_t vma_pgoff;
1191 int err = 0;
1192 bool merge_prev = false;
1193 bool merge_next = false;
1194 bool vma_expanded = false;
1195 unsigned long addr = vmg->start;
1196 unsigned long end = vmg->end;
1197 unsigned long vma_start = addr;
1198 unsigned long vma_end = end;
1199 pgoff_t pglen = PHYS_PFN(end - addr);
1200 long adj_start = 0;
1201
1202 vmg->state = VMA_MERGE_NOMERGE;
1203
1204 /*
1205 * We later require that vma->vm_flags == vm_flags,
1206 * so this tests vma->vm_flags & VM_SPECIAL, too.
1207 */
1208 if (vmg->flags & VM_SPECIAL)
1209 return NULL;
1210
1211 /* Does the input range span an existing VMA? (cases 5 - 8) */
1212 curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end);
1213
1214 if (!curr || /* cases 1 - 4 */
1215 end == curr->vm_end) /* cases 6 - 8, adjacent VMA */
1216 next = vmg->next = vma_lookup(mm, end);
1217 else
1218 next = vmg->next = NULL; /* case 5 */
1219
1220 if (prev) {
1221 vma_start = prev->vm_start;
1222 vma_pgoff = prev->vm_pgoff;
1223
1224 /* Can we merge the predecessor? */
1225 if (addr == prev->vm_end && can_vma_merge_after(vmg)) {
1226 merge_prev = true;
1227 vma_prev(vmg->vmi);
1228 }
1229 }
1230
1231 /* Can we merge the successor? */
1232 if (next && can_vma_merge_before(vmg)) {
1233 merge_next = true;
1234 }
1235
1236 /* Verify some invariant that must be enforced by the caller. */
1237 VM_WARN_ON(prev && addr <= prev->vm_start);
1238 VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end));
1239 VM_WARN_ON(addr >= end);
1240
1241 if (!merge_prev && !merge_next)
1242 return NULL; /* Not mergeable. */
1243
1244 if (merge_prev)
1245 vma_start_write(prev);
1246
1247 res = vma = prev;
1248 remove = remove2 = adjust = NULL;
1249
1250 /* Can we merge both the predecessor and the successor? */
1251 if (merge_prev && merge_next &&
1252 is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) {
1253 vma_start_write(next);
1254 remove = next; /* case 1 */
1255 vma_end = next->vm_end;
1256 err = dup_anon_vma(prev, next, &anon_dup);
1257 if (curr) { /* case 6 */
1258 vma_start_write(curr);
1259 remove = curr;
1260 remove2 = next;
1261 /*
1262 * Note that the dup_anon_vma below cannot overwrite err
1263 * since the first caller would do nothing unless next
1264 * has an anon_vma.
1265 */
1266 if (!next->anon_vma)
1267 err = dup_anon_vma(prev, curr, &anon_dup);
1268 }
1269 } else if (merge_prev) { /* case 2 */
1270 if (curr) {
1271 vma_start_write(curr);
1272 if (end == curr->vm_end) { /* case 7 */
1273 /*
1274 * can_vma_merge_after() assumed we would not be
1275 * removing prev vma, so it skipped the check
1276 * for vm_ops->close, but we are removing curr
1277 */
1278 if (curr->vm_ops && curr->vm_ops->close)
1279 err = -EINVAL;
1280 remove = curr;
1281 } else { /* case 5 */
1282 adjust = curr;
1283 adj_start = (end - curr->vm_start);
1284 }
1285 if (!err)
1286 err = dup_anon_vma(prev, curr, &anon_dup);
1287 }
1288 } else { /* merge_next */
1289 vma_start_write(next);
1290 res = next;
1291 if (prev && addr < prev->vm_end) { /* case 4 */
1292 vma_start_write(prev);
1293 vma_end = addr;
1294 adjust = next;
1295 adj_start = -(prev->vm_end - addr);
1296 err = dup_anon_vma(next, prev, &anon_dup);
1297 } else {
1298 /*
1299 * Note that cases 3 and 8 are the ONLY ones where prev
1300 * is permitted to be (but is not necessarily) NULL.
1301 */
1302 vma = next; /* case 3 */
1303 vma_start = addr;
1304 vma_end = next->vm_end;
1305 vma_pgoff = next->vm_pgoff - pglen;
1306 if (curr) { /* case 8 */
1307 vma_pgoff = curr->vm_pgoff;
1308 vma_start_write(curr);
1309 remove = curr;
1310 err = dup_anon_vma(next, curr, &anon_dup);
1311 }
1312 }
1313 }
1314
1315 /* Error in anon_vma clone. */
1316 if (err)
1317 goto anon_vma_fail;
1318
1319 if (vma_start < vma->vm_start || vma_end > vma->vm_end)
1320 vma_expanded = true;
1321
1322 if (vma_expanded) {
1323 vma_iter_config(vmg->vmi, vma_start, vma_end);
1324 } else {
1325 vma_iter_config(vmg->vmi, adjust->vm_start + adj_start,
1326 adjust->vm_end);
1327 }
1328
1329 if (vma_iter_prealloc(vmg->vmi, vma))
1330 goto prealloc_fail;
1331
1332 init_multi_vma_prep(&vp, vma, adjust, remove, remove2);
1333 VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
1334 vp.anon_vma != adjust->anon_vma);
1335
1336 vma_prepare(&vp);
1337 vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start);
1338 vma_set_range(vma, vma_start, vma_end, vma_pgoff);
1339
1340 if (vma_expanded)
1341 vma_iter_store(vmg->vmi, vma);
1342
1343 if (adj_start) {
1344 adjust->vm_start += adj_start;
1345 adjust->vm_pgoff += adj_start >> PAGE_SHIFT;
1346 if (adj_start < 0) {
1347 WARN_ON(vma_expanded);
1348 vma_iter_store(vmg->vmi, next);
1349 }
1350 }
1351
1352 vma_complete(&vp, vmg->vmi, mm);
1353 validate_mm(mm);
1354 khugepaged_enter_vma(res, vmg->flags);
1355
1356 vmg->state = VMA_MERGE_SUCCESS;
1357 return res;
1358
1359prealloc_fail:
1360 vmg->state = VMA_MERGE_ERROR_NOMEM;
1361 if (anon_dup)
1362 unlink_anon_vmas(anon_dup);
1363
1364anon_vma_fail:
1365 if (err == -ENOMEM)
1366 vmg->state = VMA_MERGE_ERROR_NOMEM;
1367
1368 vma_iter_set(vmg->vmi, addr);
1369 vma_iter_load(vmg->vmi);
1370 return NULL;
1371}
1372
1373/*
1374 * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd
1375 * context and anonymous VMA name within the range [start, end).
1376 *
1377 * As a result, we might be able to merge the newly modified VMA range with an
1378 * adjacent VMA with identical properties.
1379 *
1380 * If no merge is possible and the range does not span the entirety of the VMA,
1381 * we then need to split the VMA to accommodate the change.
1382 *
1383 * The function returns either the merged VMA, the original VMA if a split was
1384 * required instead, or an error if the split failed.
1385 */
1386static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg)
1387{
1388 struct vm_area_struct *vma = vmg->vma;
1389 struct vm_area_struct *merged;
1390
1391 /* First, try to merge. */
1380 * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd
1381 * context and anonymous VMA name within the range [start, end).
1382 *
1383 * As a result, we might be able to merge the newly modified VMA range with an
1384 * adjacent VMA with identical properties.
1385 *
1386 * If no merge is possible and the range does not span the entirety of the VMA,
1387 * we then need to split the VMA to accommodate the change.
1388 *
1389 * The function returns either the merged VMA, the original VMA if a split was
1390 * required instead, or an error if the split failed.
1391 */
1392static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg)
1393{
1394 struct vm_area_struct *vma = vmg->vma;
1395 struct vm_area_struct *merged;
1396
1397 /* First, try to merge. */
1392 merged = vma_merge(vmg);
1398 merged = vma_merge_existing_range(vmg);
1393 if (merged)
1394 return merged;
1395
1396 /* Split any preceding portion of the VMA. */
1397 if (vma->vm_start < vmg->start) {
1398 int err = split_vma(vmg->vmi, vma, vmg->start, 1);
1399
1400 if (err)

--- 641 unchanged lines hidden ---
1399 if (merged)
1400 return merged;
1401
1402 /* Split any preceding portion of the VMA. */
1403 if (vma->vm_start < vmg->start) {
1404 int err = split_vma(vmg->vmi, vma, vmg->start, 1);
1405
1406 if (err)

--- 641 unchanged lines hidden ---