vma.c (fc21959f74bc1138b28e90a02ec224ab8626111e) | vma.c (cacded5e42b9609b07b22d80c10f0076d439f7d1) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-or-later 2 3/* 4 * VMA-specific functions. 5 */ 6 7#include "vma_internal.h" 8#include "vma.h" --- 41 unchanged lines hidden (view full) --- 50 * parents. This can improve scalability caused by anon_vma lock. 51 */ 52 if ((!anon_vma1 || !anon_vma2) && (!vma || 53 list_is_singular(&vma->anon_vma_chain))) 54 return true; 55 return anon_vma1 == anon_vma2; 56} 57 | 1// SPDX-License-Identifier: GPL-2.0-or-later 2 3/* 4 * VMA-specific functions. 5 */ 6 7#include "vma_internal.h" 8#include "vma.h" --- 41 unchanged lines hidden (view full) --- 50 * parents. This can improve scalability caused by anon_vma lock. 51 */ 52 if ((!anon_vma1 || !anon_vma2) && (!vma || 53 list_is_singular(&vma->anon_vma_chain))) 54 return true; 55 return anon_vma1 == anon_vma2; 56} 57 |
58/* Are the anon_vma's belonging to each VMA compatible with one another? */ 59static inline bool are_anon_vmas_compatible(struct vm_area_struct *vma1, 60 struct vm_area_struct *vma2) 61{ 62 return is_mergeable_anon_vma(vma1->anon_vma, vma2->anon_vma, NULL); 63} 64 |
|
58/* 59 * init_multi_vma_prep() - Initializer for struct vma_prepare 60 * @vp: The vma_prepare struct 61 * @vma: The vma that will be altered once locked 62 * @next: The next vma if it is to be adjusted 63 * @remove: The first vma to be removed 64 * @remove2: The second vma to be removed 65 */ --- 60 unchanged lines hidden (view full) --- 126 is_mergeable_anon_vma(vmg->anon_vma, vmg->prev->anon_vma, vmg->prev)) { 127 if (vmg->prev->vm_pgoff + vma_pages(vmg->prev) == vmg->pgoff) 128 return true; 129 } 130 return false; 131} 132 133/* | 65/* 66 * init_multi_vma_prep() - Initializer for struct vma_prepare 67 * @vp: The vma_prepare struct 68 * @vma: The vma that will be altered once locked 69 * @next: The next vma if it is to be adjusted 70 * @remove: The first vma to be removed 71 * @remove2: The second vma to be removed 72 */ --- 60 unchanged lines hidden (view full) --- 133 is_mergeable_anon_vma(vmg->anon_vma, vmg->prev->anon_vma, vmg->prev)) { 134 if (vmg->prev->vm_pgoff + vma_pages(vmg->prev) == vmg->pgoff) 135 return true; 136 } 137 return false; 138} 139 140/* |
141 * Can the proposed VMA be merged with the left (previous) VMA taking into 142 * account the start position of the proposed range. 143 */ 144static bool can_vma_merge_left(struct vma_merge_struct *vmg) 145 146{ 147 return vmg->prev && vmg->prev->vm_end == vmg->start && 148 can_vma_merge_after(vmg); 149} 150 151/* 152 * Can the proposed VMA be merged with the right (next) VMA taking into 153 * account the end position of the proposed range. 154 * 155 * In addition, if we can merge with the left VMA, ensure that left and right 156 * anon_vma's are also compatible. 157 */ 158static bool can_vma_merge_right(struct vma_merge_struct *vmg, 159 bool can_merge_left) 160{ 161 if (!vmg->next || vmg->end != vmg->next->vm_start || 162 !can_vma_merge_before(vmg)) 163 return false; 164 165 if (!can_merge_left) 166 return true; 167 168 /* 169 * If we can merge with prev (left) and next (right), indicating that 170 * each VMA's anon_vma is compatible with the proposed anon_vma, this 171 * does not mean prev and next are compatible with EACH OTHER. 172 * 173 * We therefore check this in addition to mergeability to either side. 174 */ 175 return are_anon_vmas_compatible(vmg->prev, vmg->next); 176} 177 178/* |
|
134 * Close a vm structure and free it. 135 */ 136void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed) 137{ 138 might_sleep(); 139 if (!closed && vma->vm_ops && vma->vm_ops->close) 140 vma->vm_ops->close(vma); 141 if (vma->vm_file) --- 318 unchanged lines hidden (view full) --- 460 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i); 461 bug = 1; 462 } 463 VM_BUG_ON_MM(bug, mm); 464} 465#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ 466 467/* | 179 * Close a vm structure and free it. 180 */ 181void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed) 182{ 183 might_sleep(); 184 if (!closed && vma->vm_ops && vma->vm_ops->close) 185 vma->vm_ops->close(vma); 186 if (vma->vm_file) --- 318 unchanged lines hidden (view full) --- 505 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i); 506 bug = 1; 507 } 508 VM_BUG_ON_MM(bug, mm); 509} 510#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ 511 512/* |
513 * vma_merge_new_range - Attempt to merge a new VMA into address space 514 * 515 * @vmg: Describes the VMA we are adding, in the range @vmg->start to @vmg->end 516 * (exclusive), which we try to merge with any adjacent VMAs if possible. 517 * 518 * We are about to add a VMA to the address space starting at @vmg->start and 519 * ending at @vmg->end. There are three different possible scenarios: 520 * 521 * 1. There is a VMA with identical properties immediately adjacent to the 522 * proposed new VMA [@vmg->start, @vmg->end) either before or after it - 523 * EXPAND that VMA: 524 * 525 * Proposed: |-----| or |-----| 526 * Existing: |----| |----| 527 * 528 * 2. There are VMAs with identical properties immediately adjacent to the 529 * proposed new VMA [@vmg->start, @vmg->end) both before AND after it - 530 * EXPAND the former and REMOVE the latter: 531 * 532 * Proposed: |-----| 533 * Existing: |----| |----| 534 * 535 * 3. There are no VMAs immediately adjacent to the proposed new VMA or those 536 * VMAs do not have identical attributes - NO MERGE POSSIBLE. 537 * 538 * In instances where we can merge, this function returns the expanded VMA which 539 * will have its range adjusted accordingly and the underlying maple tree also 540 * adjusted. 541 * 542 * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer 543 * to the VMA we expanded. 544 * 545 * This function adjusts @vmg to provide @vmg->next if not already specified, 546 * and adjusts [@vmg->start, @vmg->end) to span the expanded range. 547 * 548 * ASSUMPTIONS: 549 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock. 550 * - The caller must have determined that [@vmg->start, @vmg->end) is empty, 551 other than VMAs that will be unmapped should the operation succeed. 552 * - The caller must have specified the previous vma in @vmg->prev. 553 * - The caller must have specified the next vma in @vmg->next. 554 * - The caller must have positioned the vmi at or before the gap. 555 */ 556struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) 557{ 558 struct vm_area_struct *prev = vmg->prev; 559 struct vm_area_struct *next = vmg->next; 560 unsigned long start = vmg->start; 561 unsigned long end = vmg->end; 562 pgoff_t pgoff = vmg->pgoff; 563 pgoff_t pglen = PHYS_PFN(end - start); 564 bool can_merge_left, can_merge_right; 565 566 mmap_assert_write_locked(vmg->mm); 567 VM_WARN_ON(vmg->vma); 568 /* vmi must point at or before the gap. */ 569 VM_WARN_ON(vma_iter_addr(vmg->vmi) > end); 570 571 vmg->state = VMA_MERGE_NOMERGE; 572 573 /* Special VMAs are unmergeable, also if no prev/next. */ 574 if ((vmg->flags & VM_SPECIAL) || (!prev && !next)) 575 return NULL; 576 577 can_merge_left = can_vma_merge_left(vmg); 578 can_merge_right = can_vma_merge_right(vmg, can_merge_left); 579 580 /* If we can merge with the next VMA, adjust vmg accordingly. */ 581 if (can_merge_right) { 582 vmg->end = next->vm_end; 583 vmg->vma = next; 584 vmg->pgoff = next->vm_pgoff - pglen; 585 } 586 587 /* If we can merge with the previous VMA, adjust vmg accordingly. */ 588 if (can_merge_left) { 589 vmg->start = prev->vm_start; 590 vmg->vma = prev; 591 vmg->pgoff = prev->vm_pgoff; 592 593 vma_prev(vmg->vmi); /* Equivalent to going to the previous range */ 594 } 595 596 /* 597 * Now try to expand adjacent VMA(s). This takes care of removing the 598 * following VMA if we have VMAs on both sides. 599 */ 600 if (vmg->vma && !vma_expand(vmg)) { 601 khugepaged_enter_vma(vmg->vma, vmg->flags); 602 vmg->state = VMA_MERGE_SUCCESS; 603 return vmg->vma; 604 } 605 606 /* If expansion failed, reset state. Allows us to retry merge later. */ 607 vmg->vma = NULL; 608 vmg->start = start; 609 vmg->end = end; 610 vmg->pgoff = pgoff; 611 if (vmg->vma == prev) 612 vma_iter_set(vmg->vmi, start); 613 614 return NULL; 615} 616 617/* |
|
468 * vma_expand - Expand an existing VMA 469 * 470 * @vmg: Describes a VMA expansion operation. 471 * 472 * Expand @vma to vmg->start and vmg->end. Can expand off the start and end. 473 * Will expand over vmg->next if it's different from vmg->vma and vmg->end == 474 * vmg->next->vm_end. Checking if the vmg->vma can expand and merge with 475 * vmg->next needs to be handled by the caller. 476 * | 618 * vma_expand - Expand an existing VMA 619 * 620 * @vmg: Describes a VMA expansion operation. 621 * 622 * Expand @vma to vmg->start and vmg->end. Can expand off the start and end. 623 * Will expand over vmg->next if it's different from vmg->vma and vmg->end == 624 * vmg->next->vm_end. Checking if the vmg->vma can expand and merge with 625 * vmg->next needs to be handled by the caller. 626 * |
477 * Returns: 0 on success | 627 * Returns: 0 on success. 628 * 629 * ASSUMPTIONS: 630 * - The caller must hold a WRITE lock on vmg->vma->mm->mmap_lock. 631 * - The caller must have set @vmg->vma and @vmg->next. |
478 */ 479int vma_expand(struct vma_merge_struct *vmg) 480{ 481 struct vm_area_struct *anon_dup = NULL; 482 bool remove_next = false; 483 struct vm_area_struct *vma = vmg->vma; 484 struct vm_area_struct *next = vmg->next; 485 struct vma_prepare vp; 486 | 632 */ 633int vma_expand(struct vma_merge_struct *vmg) 634{ 635 struct vm_area_struct *anon_dup = NULL; 636 bool remove_next = false; 637 struct vm_area_struct *vma = vmg->vma; 638 struct vm_area_struct *next = vmg->next; 639 struct vma_prepare vp; 640 |
641 mmap_assert_write_locked(vmg->mm); 642 |
|
487 vma_start_write(vma); 488 if (next && (vma != next) && (vmg->end == next->vm_end)) { 489 int ret; 490 491 remove_next = true; 492 vma_start_write(next); 493 ret = dup_anon_vma(vma, next, &anon_dup); 494 if (ret) --- 16 unchanged lines hidden (view full) --- 511 vma_adjust_trans_huge(vma, vmg->start, vmg->end, 0); 512 vma_set_range(vma, vmg->start, vmg->end, vmg->pgoff); 513 vma_iter_store(vmg->vmi, vma); 514 515 vma_complete(&vp, vmg->vmi, vma->vm_mm); 516 return 0; 517 518nomem: | 643 vma_start_write(vma); 644 if (next && (vma != next) && (vmg->end == next->vm_end)) { 645 int ret; 646 647 remove_next = true; 648 vma_start_write(next); 649 ret = dup_anon_vma(vma, next, &anon_dup); 650 if (ret) --- 16 unchanged lines hidden (view full) --- 667 vma_adjust_trans_huge(vma, vmg->start, vmg->end, 0); 668 vma_set_range(vma, vmg->start, vmg->end, vmg->pgoff); 669 vma_iter_store(vmg->vmi, vma); 670 671 vma_complete(&vp, vmg->vmi, vma->vm_mm); 672 return 0; 673 674nomem: |
675 vmg->state = VMA_MERGE_ERROR_NOMEM; |
|
519 if (anon_dup) 520 unlink_anon_vmas(anon_dup); 521 return -ENOMEM; 522} 523 524/* 525 * vma_shrink() - Reduce an existing VMAs memory area 526 * @vmi: The vma iterator --- 497 unchanged lines hidden (view full) --- 1024 bool vma_expanded = false; 1025 unsigned long addr = vmg->start; 1026 unsigned long end = vmg->end; 1027 unsigned long vma_start = addr; 1028 unsigned long vma_end = end; 1029 pgoff_t pglen = PHYS_PFN(end - addr); 1030 long adj_start = 0; 1031 | 676 if (anon_dup) 677 unlink_anon_vmas(anon_dup); 678 return -ENOMEM; 679} 680 681/* 682 * vma_shrink() - Reduce an existing VMAs memory area 683 * @vmi: The vma iterator --- 497 unchanged lines hidden (view full) --- 1181 bool vma_expanded = false; 1182 unsigned long addr = vmg->start; 1183 unsigned long end = vmg->end; 1184 unsigned long vma_start = addr; 1185 unsigned long vma_end = end; 1186 pgoff_t pglen = PHYS_PFN(end - addr); 1187 long adj_start = 0; 1188 |
1189 vmg->state = VMA_MERGE_NOMERGE; 1190 |
|
1032 /* 1033 * We later require that vma->vm_flags == vm_flags, 1034 * so this tests vma->vm_flags & VM_SPECIAL, too. 1035 */ 1036 if (vmg->flags & VM_SPECIAL) 1037 return NULL; 1038 1039 /* Does the input range span an existing VMA? (cases 5 - 8) */ --- 135 unchanged lines hidden (view full) --- 1175 WARN_ON(vma_expanded); 1176 vma_iter_store(vmg->vmi, next); 1177 } 1178 } 1179 1180 vma_complete(&vp, vmg->vmi, mm); 1181 validate_mm(mm); 1182 khugepaged_enter_vma(res, vmg->flags); | 1191 /* 1192 * We later require that vma->vm_flags == vm_flags, 1193 * so this tests vma->vm_flags & VM_SPECIAL, too. 1194 */ 1195 if (vmg->flags & VM_SPECIAL) 1196 return NULL; 1197 1198 /* Does the input range span an existing VMA? (cases 5 - 8) */ --- 135 unchanged lines hidden (view full) --- 1334 WARN_ON(vma_expanded); 1335 vma_iter_store(vmg->vmi, next); 1336 } 1337 } 1338 1339 vma_complete(&vp, vmg->vmi, mm); 1340 validate_mm(mm); 1341 khugepaged_enter_vma(res, vmg->flags); |
1342 1343 vmg->state = VMA_MERGE_SUCCESS; |
|
1183 return res; 1184 1185prealloc_fail: | 1344 return res; 1345 1346prealloc_fail: |
1347 vmg->state = VMA_MERGE_ERROR_NOMEM; |
|
1186 if (anon_dup) 1187 unlink_anon_vmas(anon_dup); 1188 1189anon_vma_fail: | 1348 if (anon_dup) 1349 unlink_anon_vmas(anon_dup); 1350 1351anon_vma_fail: |
1352 if (err == -ENOMEM) 1353 vmg->state = VMA_MERGE_ERROR_NOMEM; 1354 |
|
1190 vma_iter_set(vmg->vmi, addr); 1191 vma_iter_load(vmg->vmi); 1192 return NULL; 1193} 1194 1195/* 1196 * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd 1197 * context and anonymous VMA name within the range [start, end). --- 91 unchanged lines hidden (view full) --- 1289 1290 vmg.flags = new_flags; 1291 vmg.uffd_ctx = new_ctx; 1292 1293 return vma_modify(&vmg); 1294} 1295 1296/* | 1355 vma_iter_set(vmg->vmi, addr); 1356 vma_iter_load(vmg->vmi); 1357 return NULL; 1358} 1359 1360/* 1361 * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd 1362 * context and anonymous VMA name within the range [start, end). --- 91 unchanged lines hidden (view full) --- 1454 1455 vmg.flags = new_flags; 1456 vmg.uffd_ctx = new_ctx; 1457 1458 return vma_modify(&vmg); 1459} 1460 1461/* |
1297 * Attempt to merge a newly mapped VMA with those adjacent to it. The caller 1298 * must ensure that [start, end) does not overlap any existing VMA. 1299 */ 1300struct vm_area_struct 1301*vma_merge_new_vma(struct vma_iterator *vmi, struct vm_area_struct *prev, 1302 struct vm_area_struct *vma, unsigned long start, 1303 unsigned long end, pgoff_t pgoff) 1304{ 1305 VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); 1306 1307 vmg.pgoff = pgoff; 1308 1309 return vma_merge(&vmg); 1310} 1311 1312/* | |
1313 * Expand vma by delta bytes, potentially merging with an immediately adjacent 1314 * VMA with identical properties. 1315 */ 1316struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi, 1317 struct vm_area_struct *vma, 1318 unsigned long delta) 1319{ 1320 VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta); 1321 | 1462 * Expand vma by delta bytes, potentially merging with an immediately adjacent 1463 * VMA with identical properties. 1464 */ 1465struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi, 1466 struct vm_area_struct *vma, 1467 unsigned long delta) 1468{ 1469 VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta); 1470 |
1322 /* vma is specified as prev, so case 1 or 2 will apply. */ 1323 return vma_merge(&vmg); | 1471 vmg.next = vma_iter_next_rewind(vmi, NULL); 1472 vmg.vma = NULL; /* We use the VMA to populate VMG fields only. */ 1473 1474 return vma_merge_new_range(&vmg); |
1324} 1325 1326void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb) 1327{ 1328 vb->count = 0; 1329} 1330 1331static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb) --- 84 unchanged lines hidden (view full) --- 1416 */ 1417struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 1418 unsigned long addr, unsigned long len, pgoff_t pgoff, 1419 bool *need_rmap_locks) 1420{ 1421 struct vm_area_struct *vma = *vmap; 1422 unsigned long vma_start = vma->vm_start; 1423 struct mm_struct *mm = vma->vm_mm; | 1475} 1476 1477void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb) 1478{ 1479 vb->count = 0; 1480} 1481 1482static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb) --- 84 unchanged lines hidden (view full) --- 1567 */ 1568struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 1569 unsigned long addr, unsigned long len, pgoff_t pgoff, 1570 bool *need_rmap_locks) 1571{ 1572 struct vm_area_struct *vma = *vmap; 1573 unsigned long vma_start = vma->vm_start; 1574 struct mm_struct *mm = vma->vm_mm; |
1424 struct vm_area_struct *new_vma, *prev; | 1575 struct vm_area_struct *new_vma; |
1425 bool faulted_in_anon_vma = true; 1426 VMA_ITERATOR(vmi, mm, addr); | 1576 bool faulted_in_anon_vma = true; 1577 VMA_ITERATOR(vmi, mm, addr); |
1578 VMG_VMA_STATE(vmg, &vmi, NULL, vma, addr, addr + len); |
|
1427 1428 /* 1429 * If anonymous vma has not yet been faulted, update new pgoff 1430 * to match new location, to increase its chance of merging. 1431 */ 1432 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { 1433 pgoff = addr >> PAGE_SHIFT; 1434 faulted_in_anon_vma = false; 1435 } 1436 | 1579 1580 /* 1581 * If anonymous vma has not yet been faulted, update new pgoff 1582 * to match new location, to increase its chance of merging. 1583 */ 1584 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { 1585 pgoff = addr >> PAGE_SHIFT; 1586 faulted_in_anon_vma = false; 1587 } 1588 |
1437 new_vma = find_vma_prev(mm, addr, &prev); | 1589 new_vma = find_vma_prev(mm, addr, &vmg.prev); |
1438 if (new_vma && new_vma->vm_start < addr + len) 1439 return NULL; /* should never get here */ 1440 | 1590 if (new_vma && new_vma->vm_start < addr + len) 1591 return NULL; /* should never get here */ 1592 |
1441 new_vma = vma_merge_new_vma(&vmi, prev, vma, addr, addr + len, pgoff); | 1593 vmg.vma = NULL; /* New VMA range. */ 1594 vmg.pgoff = pgoff; 1595 vmg.next = vma_iter_next_rewind(&vmi, NULL); 1596 new_vma = vma_merge_new_range(&vmg); 1597 |
1442 if (new_vma) { 1443 /* 1444 * Source vma may have been merged into new_vma 1445 */ 1446 if (unlikely(vma_start >= new_vma->vm_start && 1447 vma_start < new_vma->vm_end)) { 1448 /* 1449 * The only way we can get a vma_merge with --- 423 unchanged lines hidden --- | 1598 if (new_vma) { 1599 /* 1600 * Source vma may have been merged into new_vma 1601 */ 1602 if (unlikely(vma_start >= new_vma->vm_start && 1603 vma_start < new_vma->vm_end)) { 1604 /* 1605 * The only way we can get a vma_merge with --- 423 unchanged lines hidden --- |